hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
86e0d60fa6d4237e4bccb547f8a776c77f7c4578
| 175
|
py
|
Python
|
ulmo/usgs/eddn/__init__.py
|
sblack-usu/ulmo
|
3213bf0302b44e77abdff1f3f66e7f1083571ce8
|
[
"BSD-3-Clause"
] | 3
|
2017-09-17T21:27:48.000Z
|
2022-03-15T12:58:53.000Z
|
ulmo/usgs/eddn/__init__.py
|
sblack-usu/ulmo
|
3213bf0302b44e77abdff1f3f66e7f1083571ce8
|
[
"BSD-3-Clause"
] | null | null | null |
ulmo/usgs/eddn/__init__.py
|
sblack-usu/ulmo
|
3213bf0302b44e77abdff1f3f66e7f1083571ce8
|
[
"BSD-3-Clause"
] | 3
|
2021-02-23T06:26:00.000Z
|
2021-02-23T06:26:18.000Z
|
"""
`USGS Emergency Data Distribution Network`_ services
.. _USGS Emergency Data Distribution Network: http://eddn.usgs.gov/
"""
from .core import get_data, decode
| 19.444444
| 71
| 0.714286
| 21
| 175
| 5.809524
| 0.666667
| 0.213115
| 0.278689
| 0.47541
| 0.590164
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177143
| 175
| 8
| 72
| 21.875
| 0.847222
| 0.697143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
86e93a2a980cbbba140d2135b92fc06504ed8e60
| 164
|
py
|
Python
|
badwing/character/__init__.py
|
kfields/badwing
|
5f53c98cbb6fca8390e1632fa559f5201861365b
|
[
"MIT"
] | 3
|
2020-03-23T06:43:25.000Z
|
2022-02-18T16:35:56.000Z
|
badwing/character/__init__.py
|
kfields/badwing
|
5f53c98cbb6fca8390e1632fa559f5201861365b
|
[
"MIT"
] | 2
|
2020-03-26T02:05:36.000Z
|
2021-08-02T19:13:06.000Z
|
badwing/character/__init__.py
|
kfields/badwing
|
5f53c98cbb6fca8390e1632fa559f5201861365b
|
[
"MIT"
] | null | null | null |
#from badwing.character.character import Character
from badwing.character.controller import CharacterController
from badwing.character.sprite import CharacterSprite
| 54.666667
| 60
| 0.890244
| 18
| 164
| 8.111111
| 0.444444
| 0.226027
| 0.410959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067073
| 164
| 3
| 61
| 54.666667
| 0.954248
| 0.29878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
86f0397920108fbc404982de685e915fe09cb17d
| 122
|
py
|
Python
|
bokeh/server/views/deps.py
|
tswicegood/bokeh
|
2e74be5c9288306896e8c76af2e14a8c7513e0e3
|
[
"BSD-3-Clause"
] | 2
|
2021-09-01T12:36:06.000Z
|
2021-11-17T10:48:36.000Z
|
bokeh/server/views/deps.py
|
brian15co/bokeh
|
6cecb7211277b9d838039d0eb15e50a10f9ac3d1
|
[
"BSD-3-Clause"
] | null | null | null |
bokeh/server/views/deps.py
|
brian15co/bokeh
|
6cecb7211277b9d838039d0eb15e50a10f9ac3d1
|
[
"BSD-3-Clause"
] | 2
|
2015-12-22T04:13:10.000Z
|
2021-07-06T21:18:04.000Z
|
from . import backbone, data, main, plugins, statics
# this just shuts up pyflakes
backbone, data, main, plugins, statics
| 30.5
| 52
| 0.762295
| 17
| 122
| 5.470588
| 0.705882
| 0.258065
| 0.344086
| 0.494624
| 0.645161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155738
| 122
| 4
| 53
| 30.5
| 0.902913
| 0.221311
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
86ff4f400244a21b8bb704a074e925f6590804ba
| 19,264
|
py
|
Python
|
tests/model_monitoring/monitors/test_data_quality_monitor.py
|
vgtom/evidently
|
543711483baa8e27dfbc81a6575d553ab57cc897
|
[
"Apache-2.0"
] | null | null | null |
tests/model_monitoring/monitors/test_data_quality_monitor.py
|
vgtom/evidently
|
543711483baa8e27dfbc81a6575d553ab57cc897
|
[
"Apache-2.0"
] | null | null | null |
tests/model_monitoring/monitors/test_data_quality_monitor.py
|
vgtom/evidently
|
543711483baa8e27dfbc81a6575d553ab57cc897
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
import pandas as pd
from evidently.model_monitoring import ModelMonitoring
from evidently.model_monitoring.monitors.data_quality import DataQualityMonitor
from evidently.pipeline.column_mapping import ColumnMapping
from tests.model_monitoring.helpers import collect_metrics_results
def test_monitor_id():
assert DataQualityMonitor().monitor_id() == "data_quality"
def test_data_quality_monitor_regression() -> None:
reference_data = pd.DataFrame(
{
"my_target": [1, 2, 1, 4],
"my_prediction": [2, -1, 1, -1],
"numerical_feature": [0, 2, -1, 5],
"categorical_feature": ["y", "y", "n", "u"],
"datetime_feature": [
datetime(year=2012, month=1, day=5),
datetime(year=2002, month=12, day=5),
datetime(year=2012, month=1, day=5),
datetime(year=2012, month=1, day=6),
],
}
)
current_data = pd.DataFrame(
{
"my_target": [1],
"my_prediction": [0],
"numerical_feature": [5],
"categorical_feature": ["y"],
"datetime_feature": [datetime(year=2012, month=1, day=5)],
}
)
data_mapping = ColumnMapping(
target="my_target",
prediction="my_prediction",
numerical_features=["numerical_feature"],
categorical_features=["categorical_feature"],
datetime_features=["datetime_feature"],
task="regression",
)
evidently_monitoring = ModelMonitoring(monitors=[DataQualityMonitor()], options=None)
evidently_monitoring.execute(reference_data=reference_data, current_data=current_data, column_mapping=data_mapping)
result = collect_metrics_results(evidently_monitoring.metrics())
assert "data_quality:quality_stat" in result
assert result["data_quality:quality_stat"] == [
{"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "count"}, "value": 4},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "infinite_count"},
"value": 0,
},
{
"labels": {
"dataset": "reference",
"feature": "num",
"feature_type": "num",
"metric": "infinite_percentage",
},
"value": 0.0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "missing_count"},
"value": 0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "missing_percentage"},
"value": 0.0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "unique_count"},
"value": 3,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "unique_percentage"},
"value": 75.0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "percentile_25"},
"value": 1.0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "percentile_50"},
"value": 1.5,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "percentile_75"},
"value": 2.5,
},
{"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "max"}, "value": 4},
{"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "min"}, "value": 1},
{"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "mean"}, "value": 2.0},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "most_common_value"},
"value": 1,
},
{
"labels": {
"dataset": "reference",
"feature": "num",
"feature_type": "num",
"metric": "most_common_value_percentage",
},
"value": 50.0,
},
{"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "std"}, "value": 1.41},
{
"labels": {"dataset": "reference", "feature": "datetime", "feature_type": "datetime", "metric": "count"},
"value": 4,
},
{
"labels": {
"dataset": "reference",
"feature": "datetime",
"feature_type": "datetime",
"metric": "missing_count",
},
"value": 0,
},
{
"labels": {
"dataset": "reference",
"feature": "datetime",
"feature_type": "datetime",
"metric": "missing_percentage",
},
"value": 0.0,
},
{
"labels": {
"dataset": "reference",
"feature": "datetime",
"feature_type": "datetime",
"metric": "unique_count",
},
"value": 3,
},
{
"labels": {
"dataset": "reference",
"feature": "datetime",
"feature_type": "datetime",
"metric": "unique_percentage",
},
"value": 75.0,
},
{
"labels": {"dataset": "reference", "feature": "datetime", "feature_type": "datetime", "metric": "max"},
"value": "2012-01-06 00:00:00",
},
{
"labels": {"dataset": "reference", "feature": "datetime", "feature_type": "datetime", "metric": "min"},
"value": "2002-12-05 00:00:00",
},
{
"labels": {
"dataset": "reference",
"feature": "datetime",
"feature_type": "datetime",
"metric": "most_common_value",
},
"value": "2012-01-05 00:00:00",
},
{
"labels": {
"dataset": "reference",
"feature": "datetime",
"feature_type": "datetime",
"metric": "most_common_value_percentage",
},
"value": 50.0,
},
{"labels": {"dataset": "reference", "feature": "cat", "feature_type": "cat", "metric": "count"}, "value": 4},
{
"labels": {"dataset": "reference", "feature": "cat", "feature_type": "cat", "metric": "missing_count"},
"value": 0,
},
{
"labels": {"dataset": "reference", "feature": "cat", "feature_type": "cat", "metric": "missing_percentage"},
"value": 0.0,
},
{
"labels": {"dataset": "reference", "feature": "cat", "feature_type": "cat", "metric": "unique_count"},
"value": 3,
},
{
"labels": {"dataset": "reference", "feature": "cat", "feature_type": "cat", "metric": "unique_percentage"},
"value": 75.0,
},
{
"labels": {"dataset": "reference", "feature": "cat", "feature_type": "cat", "metric": "most_common_value"},
"value": "y",
},
{
"labels": {
"dataset": "reference",
"feature": "cat",
"feature_type": "cat",
"metric": "most_common_value_percentage",
},
"value": 50.0,
},
{"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "count"}, "value": 4},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "infinite_count"},
"value": 0,
},
{
"labels": {
"dataset": "reference",
"feature": "num",
"feature_type": "num",
"metric": "infinite_percentage",
},
"value": 0.0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "missing_count"},
"value": 0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "missing_percentage"},
"value": 0.0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "unique_count"},
"value": 4,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "unique_percentage"},
"value": 100.0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "percentile_25"},
"value": -0.25,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "percentile_50"},
"value": 1.0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "percentile_75"},
"value": 2.75,
},
{"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "max"}, "value": 5},
{"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "min"}, "value": -1},
{"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "mean"}, "value": 1.5},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "most_common_value"},
"value": 5,
},
{
"labels": {
"dataset": "reference",
"feature": "num",
"feature_type": "num",
"metric": "most_common_value_percentage",
},
"value": 25.0,
},
{"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "std"}, "value": 2.65},
{"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "count"}, "value": 1},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "infinite_count"},
"value": 0,
},
{
"labels": {
"dataset": "reference",
"feature": "num",
"feature_type": "num",
"metric": "infinite_percentage",
},
"value": 0.0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "missing_count"},
"value": 0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "missing_percentage"},
"value": 0.0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "unique_count"},
"value": 1,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "unique_percentage"},
"value": 100.0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "percentile_25"},
"value": 1.0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "percentile_50"},
"value": 1.0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "percentile_75"},
"value": 1.0,
},
{"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "max"}, "value": 1},
{"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "min"}, "value": 1},
{"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "mean"}, "value": 1.0},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "most_common_value"},
"value": 1,
},
{
"labels": {
"dataset": "reference",
"feature": "num",
"feature_type": "num",
"metric": "most_common_value_percentage",
},
"value": 100.0,
},
{"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "std"}, "value": None},
{
"labels": {"dataset": "reference", "feature": "datetime", "feature_type": "datetime", "metric": "count"},
"value": 1,
},
{
"labels": {
"dataset": "reference",
"feature": "datetime",
"feature_type": "datetime",
"metric": "missing_count",
},
"value": 0,
},
{
"labels": {
"dataset": "reference",
"feature": "datetime",
"feature_type": "datetime",
"metric": "missing_percentage",
},
"value": 0.0,
},
{
"labels": {
"dataset": "reference",
"feature": "datetime",
"feature_type": "datetime",
"metric": "unique_count",
},
"value": 1,
},
{
"labels": {
"dataset": "reference",
"feature": "datetime",
"feature_type": "datetime",
"metric": "unique_percentage",
},
"value": 100.0,
},
{
"labels": {"dataset": "reference", "feature": "datetime", "feature_type": "datetime", "metric": "max"},
"value": "2012-01-05 00:00:00",
},
{
"labels": {"dataset": "reference", "feature": "datetime", "feature_type": "datetime", "metric": "min"},
"value": "2012-01-05 00:00:00",
},
{
"labels": {
"dataset": "reference",
"feature": "datetime",
"feature_type": "datetime",
"metric": "most_common_value",
},
"value": "2012-01-05 00:00:00",
},
{
"labels": {
"dataset": "reference",
"feature": "datetime",
"feature_type": "datetime",
"metric": "most_common_value_percentage",
},
"value": 100.0,
},
{"labels": {"dataset": "reference", "feature": "cat", "feature_type": "cat", "metric": "count"}, "value": 1},
{
"labels": {"dataset": "reference", "feature": "cat", "feature_type": "cat", "metric": "missing_count"},
"value": 0,
},
{
"labels": {"dataset": "reference", "feature": "cat", "feature_type": "cat", "metric": "missing_percentage"},
"value": 0.0,
},
{
"labels": {"dataset": "reference", "feature": "cat", "feature_type": "cat", "metric": "unique_count"},
"value": 1,
},
{
"labels": {"dataset": "reference", "feature": "cat", "feature_type": "cat", "metric": "unique_percentage"},
"value": 100.0,
},
{
"labels": {"dataset": "reference", "feature": "cat", "feature_type": "cat", "metric": "most_common_value"},
"value": "y",
},
{
"labels": {
"dataset": "reference",
"feature": "cat",
"feature_type": "cat",
"metric": "most_common_value_percentage",
},
"value": 100.0,
},
{
"labels": {
"dataset": "reference",
"feature": "cat",
"feature_type": "cat",
"metric": "new_in_current_values_count",
},
"value": 0,
},
{
"labels": {
"dataset": "reference",
"feature": "cat",
"feature_type": "cat",
"metric": "unused_in_current_values_count",
},
"value": 2,
},
{"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "count"}, "value": 1},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "infinite_count"},
"value": 0,
},
{
"labels": {
"dataset": "reference",
"feature": "num",
"feature_type": "num",
"metric": "infinite_percentage",
},
"value": 0.0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "missing_count"},
"value": 0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "missing_percentage"},
"value": 0.0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "unique_count"},
"value": 1,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "unique_percentage"},
"value": 100.0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "percentile_25"},
"value": 5.0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "percentile_50"},
"value": 5.0,
},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "percentile_75"},
"value": 5.0,
},
{"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "max"}, "value": 5},
{"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "min"}, "value": 5},
{"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "mean"}, "value": 5.0},
{
"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "most_common_value"},
"value": 5,
},
{
"labels": {
"dataset": "reference",
"feature": "num",
"feature_type": "num",
"metric": "most_common_value_percentage",
},
"value": 100.0,
},
{"labels": {"dataset": "reference", "feature": "num", "feature_type": "num", "metric": "std"}, "value": None},
]
| 38.528
| 120
| 0.448245
| 1,556
| 19,264
| 5.38946
| 0.060411
| 0.15192
| 0.257095
| 0.338898
| 0.87968
| 0.874911
| 0.869187
| 0.869187
| 0.866921
| 0.850823
| 0
| 0.025395
| 0.352004
| 19,264
| 499
| 121
| 38.60521
| 0.646399
| 0
| 0
| 0.50813
| 0
| 0
| 0.392857
| 0.017182
| 0
| 0
| 0
| 0
| 0.006098
| 1
| 0.004065
| false
| 0
| 0.012195
| 0
| 0.01626
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
813da4d9faafa0ae6c29283876ca69e37a36f68e
| 484
|
py
|
Python
|
file_vi_keyboard_shortcuts.py
|
Annapooraniqxf2/keyboard-shortcuts
|
683a144ed276ba6fad28c14690905738f132b414
|
[
"MIT"
] | null | null | null |
file_vi_keyboard_shortcuts.py
|
Annapooraniqxf2/keyboard-shortcuts
|
683a144ed276ba6fad28c14690905738f132b414
|
[
"MIT"
] | 2
|
2021-04-06T18:22:03.000Z
|
2021-06-02T03:35:31.000Z
|
file_vi_keyboard_shortcuts.py
|
Annapooraniqxf2/keyboard-shortcuts
|
683a144ed276ba6fad28c14690905738f132b414
|
[
"MIT"
] | 1
|
2020-07-21T08:31:37.000Z
|
2020-07-21T08:31:37.000Z
|
"""
File for keyboard exercise
"""
print("Hello, this is you first exercise!")
print("You can search through this file")
print("You can search through this file")
print("you can delete this line")
print("You can search through this file")
print("You can search through this file")
print("You can search through this file")
print("You can search through this file")
print("You can search through this file")
print("You can search through this file")
print("continue--saving the file")
| 32.266667
| 43
| 0.747934
| 78
| 484
| 4.641026
| 0.230769
| 0.198895
| 0.273481
| 0.375691
| 0.751381
| 0.751381
| 0.751381
| 0.751381
| 0.751381
| 0.751381
| 0
| 0
| 0.140496
| 484
| 14
| 44
| 34.571429
| 0.870192
| 0.053719
| 0
| 0.727273
| 0
| 0
| 0.753333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 11
|
8140a8ff23295811d9338ae4eea9732c0164d45c
| 2,742
|
py
|
Python
|
sovrin/cli/helper.py
|
sovrin-foundation/old-sovrin
|
d4e705054b7252c62fea00114060035c6eb314a4
|
[
"Apache-2.0"
] | 3
|
2017-07-19T14:26:31.000Z
|
2020-05-16T16:09:37.000Z
|
sovrin/cli/helper.py
|
sovrin-foundation/old-sovrin
|
d4e705054b7252c62fea00114060035c6eb314a4
|
[
"Apache-2.0"
] | null | null | null |
sovrin/cli/helper.py
|
sovrin-foundation/old-sovrin
|
d4e705054b7252c62fea00114060035c6eb314a4
|
[
"Apache-2.0"
] | 3
|
2017-10-28T08:19:00.000Z
|
2021-06-06T10:48:55.000Z
|
from sovrin.cli.constants import \
CLIENT_GRAMS_CLIENT_WITH_IDENTIFIER_FORMATTED_REG_EX, \
CLIENT_GRAMS_CLIENT_ADD_FORMATTED_REG_EX, SEND_NYM_FORMATTED_REG_EX, \
GET_NYM_FORMATTED_REG_EX, \
ADD_ATTRIB_FORMATTED_REG_EX, SEND_CLAIM_DEF_FORMATTED_REG_EX, \
REQ_CRED_FORMATTED_REG_EX, LIST_CREDS_FORMATTED_REG_EX, \
GEN_CRED_FORMATTED_REG_EX, ADD_GENESIS_FORMATTED_REG_EX, \
INIT_ATTR_REPO_FORMATTED_REG_EX, ADD_ATTRS_FORMATTED_REG_EX, \
STORE_CRED_FORMATTED_REG_EX, \
GEN_VERIF_NONCE_FORMATTED_REG_EX, PREP_PROOF_FORMATTED_REG_EX, \
VERIFY_PROOF_FORMATTED_REG_EX, \
ADD_ATTRS_PROVER_FORMATTED_REG_EX, CONNECT_FORMATTED_REG_EX, \
SHOW_FILE_FORMATTED_REG_EX, LOAD_FILE_FORMATTED_REG_EX, \
SHOW_LINK_FORMATTED_REG_EX, SYNC_LINK_FORMATTED_REG_EX, \
ACCEPT_LINK_FORMATTED_REG_EX, SHOW_CLAIM_FORMATTED_REG_EX, \
REQUEST_CLAIM_FORMATTED_REG_EX, SHOW_CLAIM_REQ_FORMATTED_REG_EX, \
SET_ATTRIBUTE_FORMATTED_REG_EX, \
SEND_ISSUER_KEY_FORMATTED_REG_EX, SEND_CLAIM_FORMATTED_REG_EX, \
PING_TARGET_FORMATTED_REG_EX, SEND_NODE_FORMATTED_REG_EX, \
SEND_POOL_UPG_FORMATTED_REG_EX
def getNewClientGrams():
# TODO: Why do we have to manually pipe each regex except the last
# one? Fix this
return [
ADD_GENESIS_FORMATTED_REG_EX,
# Regex for `new client steward with identifier <nym>`
CLIENT_GRAMS_CLIENT_WITH_IDENTIFIER_FORMATTED_REG_EX,
# Regex for `client steward add sponsor bob` or `client steward
# add user bob`
CLIENT_GRAMS_CLIENT_ADD_FORMATTED_REG_EX,
SEND_NYM_FORMATTED_REG_EX,
GET_NYM_FORMATTED_REG_EX,
ADD_ATTRIB_FORMATTED_REG_EX,
SEND_CLAIM_DEF_FORMATTED_REG_EX,
SEND_ISSUER_KEY_FORMATTED_REG_EX,
REQ_CRED_FORMATTED_REG_EX,
LIST_CREDS_FORMATTED_REG_EX,
GEN_CRED_FORMATTED_REG_EX,
STORE_CRED_FORMATTED_REG_EX,
GEN_VERIF_NONCE_FORMATTED_REG_EX,
PREP_PROOF_FORMATTED_REG_EX,
VERIFY_PROOF_FORMATTED_REG_EX,
INIT_ATTR_REPO_FORMATTED_REG_EX,
ADD_ATTRS_FORMATTED_REG_EX,
SHOW_LINK_FORMATTED_REG_EX,
SHOW_FILE_FORMATTED_REG_EX,
LOAD_FILE_FORMATTED_REG_EX,
ADD_ATTRS_PROVER_FORMATTED_REG_EX,
CONNECT_FORMATTED_REG_EX,
SYNC_LINK_FORMATTED_REG_EX,
ACCEPT_LINK_FORMATTED_REG_EX,
SHOW_CLAIM_REQ_FORMATTED_REG_EX,
SHOW_CLAIM_FORMATTED_REG_EX,
REQUEST_CLAIM_FORMATTED_REG_EX,
SET_ATTRIBUTE_FORMATTED_REG_EX,
PING_TARGET_FORMATTED_REG_EX,
SEND_CLAIM_FORMATTED_REG_EX,
SEND_NODE_FORMATTED_REG_EX,
SEND_POOL_UPG_FORMATTED_REG_EX
]
NEXT_COMMANDS_TO_TRY_TEXT = "Try Next:"
USAGE_TEXT = "Usage:"
| 40.323529
| 74
| 0.77097
| 397
| 2,742
| 4.612091
| 0.209068
| 0.419443
| 0.48935
| 0.117968
| 0.868378
| 0.84107
| 0.84107
| 0.84107
| 0.762425
| 0.681049
| 0
| 0
| 0.188549
| 2,742
| 67
| 75
| 40.925373
| 0.822921
| 0.075492
| 0
| 0.035088
| 0
| 0
| 0.005934
| 0
| 0
| 0
| 0
| 0.014925
| 0
| 1
| 0.017544
| false
| 0
| 0.017544
| 0.017544
| 0.052632
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
d4a6b973894166a6e36e9bf35f761d51c42e59e3
| 118
|
py
|
Python
|
sequestrum/errors.py
|
goteemLight/sequestrum
|
32fea21e45101f11f9f45f730314b6d98f70b508
|
[
"MIT"
] | null | null | null |
sequestrum/errors.py
|
goteemLight/sequestrum
|
32fea21e45101f11f9f45f730314b6d98f70b508
|
[
"MIT"
] | null | null | null |
sequestrum/errors.py
|
goteemLight/sequestrum
|
32fea21e45101f11f9f45f730314b6d98f70b508
|
[
"MIT"
] | null | null | null |
# Error Module
def format_error(error_type, error_message):
return("[{}] {}".format(error_type, error_message))
| 19.666667
| 55
| 0.711864
| 15
| 118
| 5.266667
| 0.466667
| 0.278481
| 0.35443
| 0.531646
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127119
| 118
| 5
| 56
| 23.6
| 0.76699
| 0.101695
| 0
| 0
| 0
| 0
| 0.067308
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
d4e2217e9a89efab839284d7cff39e8bc7bc8cda
| 35,495
|
py
|
Python
|
pytests/eventing/eventing_sanity.py
|
cgghali/TAF
|
1de8dec77ad781c373e18d9c285befd534ac203a
|
[
"Apache-2.0"
] | null | null | null |
pytests/eventing/eventing_sanity.py
|
cgghali/TAF
|
1de8dec77ad781c373e18d9c285befd534ac203a
|
[
"Apache-2.0"
] | null | null | null |
pytests/eventing/eventing_sanity.py
|
cgghali/TAF
|
1de8dec77ad781c373e18d9c285befd534ac203a
|
[
"Apache-2.0"
] | null | null | null |
from eventing.eventing_constants import HANDLER_CODE
from eventing.eventing_base import EventingBaseTest
from membase.helper.cluster_helper import ClusterOperationHelper
from BucketLib.bucket import Bucket
from cb_tools.cbstats import Cbstats
from couchbase_helper.documentgenerator import doc_generator
from couchbase_helper.durability_helper import DurabilityHelper
from membase.api.rest_client import RestConnection
from remote.remote_util import RemoteMachineShellConnection
class EventingSanity(EventingBaseTest):
def setUp(self):
super(EventingSanity, self).setUp()
self.rest.set_service_memoryQuota(service='memoryQuota', memoryQuota=700)
if self.create_functions_buckets:
self.bucket_size = 200
self.log.info(self.bucket_size)
bucket_params_src = Bucket({"name": self.src_bucket_name, "replicaNumber": self.num_replicas})
src_bucket = self.bucket_util.create_bucket(bucket_params_src)
self.src_bucket = self.bucket_util.get_all_buckets(self.cluster.master)[0]
bucket_params_dst = Bucket({"name": self.dst_bucket_name, "replicaNumber": self.num_replicas})
bucket_params_meta = Bucket({"name": self.metadata_bucket_name, "replicaNumber": self.num_replicas})
bucket_dst = self.bucket_util.create_bucket(bucket_params_dst)
bucket_meta = self.bucket_util.create_bucket(bucket_params_meta)
self.buckets = self.bucket_util.get_all_buckets(self.cluster.master)
self.gens_load = self.generate_docs(self.docs_per_day)
self.expiry = 3
def tearDown(self):
super(EventingSanity, self).tearDown()
def test_create_mutation_for_dcp_stream_boundary_from_beginning(self):
self.load(load_gen=self.gens_load, bucket=self.src_bucket)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_UPDATE, worker_count=3)
self.deploy_function(body)
# Wait for eventing to catch up with all the create mutations and verify results
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016)
self.undeploy_and_delete_function(body)
def test_delete_mutation_for_dcp_stream_boundary_from_beginning(self):
self.load(load_gen=self.gens_load, bucket=self.src_bucket)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_DELETE, worker_count=3)
self.deploy_function(body)
# delete all documents
self.load(load_gen=self.gens_load, bucket=self.src_bucket, operation="delete")
# Wait for eventing to catch up with all the delete mutations and verify results
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016, on_delete=True)
self.undeploy_and_delete_function(body)
def test_expiry_mutation_for_dcp_stream_boundary_from_beginning(self):
self.load(load_gen=self.gens_load, bucket=self.src_bucket)
# set expiry pager interval
ClusterOperationHelper.flushctl_set(self.master, "exp_pager_stime", 1, bucket=self.src_bucket_name)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_DELETE, worker_count=3)
self.deploy_function(body)
# Wait for eventing to catch up with all the expiry mutations and verify results
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016, on_delete=True)
self.undeploy_and_delete_function(body)
def test_update_mutation_for_dcp_stream_boundary_from_now(self):
self.load(load_gen=self.gens_load, bucket=self.src_bucket)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_UPDATE,
dcp_stream_boundary="from_now", sock_batch_size=1, worker_count=4,
cpp_worker_thread_count=4)
self.deploy_function(body)
# update all documents
self.load(load_gen=self.gens_load, bucket=self.src_bucket, operation="update")
# Wait for eventing to catch up with all the update mutations and verify results
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016)
self.undeploy_and_delete_function(body)
def test_n1ql_query_execution_from_handler_code(self):
self.load(load_gen=self.gens_load, bucket=self.src_bucket)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.N1QL_INSERT_ON_UPDATE, worker_count=3)
# Enable this after MB-26527 is fixed
# sock_batch_size=10, worker_count=4, cpp_worker_thread_count=4)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016)
self.undeploy_and_delete_function(body)
def test_doc_timer_events_from_handler_code_with_n1ql(self):
self.load(load_gen=self.gens_load, bucket=self.src_bucket)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.N1QL_INSERT_ON_UPDATE_WITH_DOC_TIMER,
worker_count=3)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016, doc_timer_events=True)
self.undeploy_and_delete_function(body)
def test_cron_timer_events_from_handler_code_with_n1ql(self):
self.load(load_gen=self.gens_load, bucket=self.src_bucket)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.N1QL_INSERT_ON_UPDATE_WITH_CRON_TIMER,
worker_count=3)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016, doc_timer_events=True)
self.undeploy_and_delete_function(body)
def test_doc_timer_events_from_handler_code_with_bucket_ops(self):
self.load(load_gen=self.gens_load, bucket=self.src_bucket)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_WITH_DOC_TIMER,
worker_count=3)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016, doc_timer_events=True)
self.undeploy_and_delete_function(body)
def test_cron_timer_events_from_handler_code_with_bucket_ops(self):
self.load(load_gen=self.gens_load, bucket=self.src_bucket)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_WITH_CRON_TIMER,
worker_count=3)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016, doc_timer_events=True)
self.undeploy_and_delete_function(body)
def test_delete_bucket_operation_from_handler_code(self):
self.load(load_gen=self.gens_load, bucket=self.src_bucket)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.DELETE_BUCKET_OP_ON_DELETE)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016)
# delete all documents
self.load(load_gen=self.gens_load, bucket=self.src_bucket, operation="delete")
# Wait for eventing to catch up with all the delete mutations and verify results
self.verify_eventing_results(self.function_name, 0, skip_stats_validation=True)
self.undeploy_and_delete_function(body)
def test_timers_without_context(self):
self.load(load_gen=self.gens_load, bucket=self.src_bucket)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_WITH_TIMER_WITHOUT_CONTEXT,
worker_count=3)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016, skip_stats_validation=True)
self.undeploy_and_delete_function(body)
def test_cancel_timers_with_timers_being_overwritten(self):
self.load(load_gen=self.gens_load, bucket=self.src_bucket)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OP_WITH_TIMER_OVERWRITTEN,
worker_count=3)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016, skip_stats_validation=True)
self.undeploy_and_delete_function(body)
def test_source_doc_mutations(self):
self.load(load_gen=self.gens_load, bucket=self.src_bucket)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OP_SOURCE_DOC_MUTATION,
worker_count=3)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
#self.verify_eventing_results(self.function_name, self.docs_per_day * 2016, skip_stats_validation=True)
#self.verify_source_bucket_mutation(self.docs_per_day * 2016)
# self.verify_source_bucket_mutation(self.docs_per_day * 2016)
# delete all documents
self.load(load_gen=self.gens_load, bucket=self.src_bucket, operation="delete")
# self.verify_source_bucket_mutation(self.docs_per_day * 2016,deletes=True,timeout=1200)
self.undeploy_and_delete_function(body)
def test_source_doc_mutations_with_timers(self):
self.load(load_gen=self.gens_load, bucket=self.src_bucket)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OP_SOURCE_DOC_MUTATION_WITH_TIMERS,
worker_count=3)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
#self.verify_eventing_results(self.function_name, self.docs_per_day * 2016, skip_stats_validation=True)
#self.verify_source_bucket_mutation(self.docs_per_day * 2016)
# self.verify_source_bucket_mutation(self.docs_per_day * 2016)
# delete all documents
self.load(load_gen=self.gens_load, bucket=self.src_bucket, operation="delete")
# self.verify_source_bucket_mutation(self.docs_per_day * 2016,deletes=True,timeout=1200)
self.undeploy_and_delete_function(body)
def test_source_bucket_mutations(self):
self.load(load_gen=self.gens_load, bucket=self.src_bucket)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OP_WITH_SOURCE_BUCKET_MUTATION,
worker_count=3)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
#self.verify_eventing_results(self.function_name, self.docs_per_day * 2016, skip_stats_validation=True)
#self.verify_source_bucket_mutation(self.docs_per_day * 2016)
self.verify_eventing_results(self.function_name, self.docs_per_day * 4032, skip_stats_validation=True)
# delete all documents
self.load(load_gen=self.gens_load, bucket=self.src_bucket, operation="delete")
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016, skip_stats_validation=True)
self.undeploy_and_delete_function(body)
def test_source_bucket_mutations_with_timers(self):
self.load(load_gen=self.gens_load, bucket=self.src_bucket)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OP_SOURCE_BUCKET_MUTATION_WITH_TIMERS,
worker_count=3)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
#self.verify_eventing_results(self.function_name, self.docs_per_day * 2016, skip_stats_validation=True)
#self.verify_source_bucket_mutation(self.docs_per_day * 2016)
self.verify_eventing_results(self.function_name, self.docs_per_day * 4032, skip_stats_validation=True)
# delete all documents
self.load(load_gen=self.gens_load, bucket=self.src_bucket, operation="delete")
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016, skip_stats_validation=True)
self.undeploy_and_delete_function(body)
def test_pause_resume_execution(self):
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_UPDATE, worker_count=3)
self.deploy_function(body)
self.load(load_gen=self.gens_load, bucket=self.src_bucket)
self.pause_function(body)
# intentionally added , as it requires some time for eventing-consumers to shutdown
self.sleep(60)
self.assertTrue(self.check_if_eventing_consumers_are_cleaned_up(),
msg="eventing-consumer processes are not cleaned up even after undeploying the function")
self.gens_load = self.generate_docs(self.docs_per_day*2)
self.load(load_gen=self.gens_load, bucket=self.src_bucket)
self.resume_function(body)
# Wait for eventing to catch up with all the create mutations and verify results
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016*2,skip_stats_validation=True)
self.undeploy_and_delete_function(body)
def test_source_bucket_mutation_for_dcp_stream_boundary_from_now(self):
self.load(load_gen=self.gens_load, bucket=self.src_bucket)
body = self.create_save_function_body(self.function_name,HANDLER_CODE.BUCKET_OP_WITH_SOURCE_BUCKET_MUTATION ,
dcp_stream_boundary="from_now", sock_batch_size=1, worker_count=4,
cpp_worker_thread_count=4)
self.deploy_function(body)
# update all documents
self.load(load_gen=self.gens_load, bucket=self.src_bucket, operation="update")
# Wait for eventing to catch up with all the update mutations and verify results
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016*2)
self.undeploy_and_delete_function(body)
def test_compress_handler(self):
self.load(load_gen=self.gens_load, bucket=self.src_bucket)
body = self.create_save_function_body(self.function_name,"handler_code/compress.js")
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016)
self.load(load_gen=self.gens_load, bucket=self.src_bucket, operation="delete")
# Wait for eventing to catch up with all the delete mutations and verify results
self.verify_eventing_results(self.function_name, 0, skip_stats_validation=True)
self.undeploy_and_delete_function(body)
def test_eventing_with_aborts(self):
"""
1. Create index (2i/view) on default bucket
2. Load multiple docs such that all sync_writes will be aborted
3. Verify nothing went into indexing
4. Load sync_write docs such that they are successful
5. Validate the mutated docs are taken into indexing
:return:
"""
self.key = "test_query_doc"
self.sync_write_abort_pattern = self.input.param("sync_write_abort_pattern", "all_aborts")
self.create_eventing_during = self.input.param("create_eventing_during", "before_doc_ops")
crud_batch_size = 50
def_bucket = self.src_bucket
kv_nodes = self.cluster_util.get_kv_nodes()
replica_vbs = dict()
verification_dict = dict()
load_gen = dict()
load_gen["ADD"] = dict()
load_gen["SET"] = dict()
partial_aborts = ["initial_aborts", "aborts_at_end"]
durability_helper = DurabilityHelper(
self.log, len(self.cluster.nodes_in_cluster),
durability=self.durability_level,
replicate_to=self.replicate_to,
persist_to=self.persist_to)
if self.create_eventing_during == "before_doc_ops":
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_UPDATE, worker_count=3)
self.deploy_function(body)
curr_items = self.bucket_util.get_bucket_current_item_count(
self.cluster, def_bucket)
if self.sync_write_abort_pattern in ["all_aborts", "initial_aborts"]:
self.bucket_util.flush_bucket(kv_nodes[0], def_bucket)
self.num_items = 0
else:
self.num_items = curr_items
self.log.info("Disabling auto_failover to avoid node failures")
status = RestConnection(self.cluster.master) \
.update_autofailover_settings(False, 120, False)
self.assertTrue(status, msg="Failure during disabling auto-failover")
# Validate vbucket stats
verification_dict["ops_create"] = self.num_items
verification_dict["ops_update"] = 0
verification_dict["rollback_item_count"] = 0
verification_dict["sync_write_aborted_count"] = 0
verification_dict["sync_write_committed_count"] = 0
self.log.info("Loading docs such that all sync_writes will be aborted")
for server in kv_nodes:
ssh_shell = RemoteMachineShellConnection(server)
cbstats = Cbstats(ssh_shell)
replica_vbs[server] = cbstats.vbucket_list(def_bucket.name,
"replica")
load_gen["ADD"][server] = list()
load_gen["ADD"][server].append(doc_generator(
self.key, 0, crud_batch_size,
target_vbucket=replica_vbs[server],
mutation_type="ADD"))
if self.sync_write_abort_pattern in partial_aborts:
load_gen["ADD"][server].append(doc_generator(
self.key, 10000, crud_batch_size,
target_vbucket=replica_vbs[server],
mutation_type="ADD"))
verification_dict["ops_create"] += crud_batch_size
verification_dict["sync_write_committed_count"] += \
crud_batch_size
task_success = self.bucket_util.load_durable_aborts(
ssh_shell, load_gen["ADD"][server], def_bucket,
self.durability_level,
"create", self.sync_write_abort_pattern)
if not task_success:
self.log_failure("Failure during load_abort task")
verification_dict["sync_write_aborted_count"] += \
crud_batch_size
if self.create_eventing_during == "before_doc_ops":
self.verify_eventing_results(self.function_name, verification_dict["ops_create"],
skip_stats_validation=True)
load_gen["SET"][server] = list()
load_gen["SET"][server].append(doc_generator(
self.key, 0, crud_batch_size,
target_vbucket=replica_vbs[server],
mutation_type="SET"))
if self.sync_write_abort_pattern in partial_aborts:
load_gen["SET"][server].append(doc_generator(
self.key, 10000, crud_batch_size,
target_vbucket=replica_vbs[server],
mutation_type="SET"))
verification_dict["ops_update"] += crud_batch_size
verification_dict["sync_write_committed_count"] += \
crud_batch_size
verification_dict["sync_write_aborted_count"] += \
crud_batch_size
task_success = self.bucket_util.load_durable_aborts(
ssh_shell, load_gen["SET"][server], def_bucket,
self.durability_level,
"update", self.sync_write_abort_pattern)
if not task_success:
self.log_failure("Failure during load_abort task")
ssh_shell.disconnect()
if self.create_eventing_during == "before_doc_ops":
self.verify_eventing_results(self.function_name, verification_dict["ops_create"],
skip_stats_validation=True)
failed = durability_helper.verify_vbucket_details_stats(
def_bucket, kv_nodes,
vbuckets=self.cluster_util.vbuckets, expected_val=verification_dict)
if failed:
self.log_failure("Cbstat vbucket-details verification failed")
self.validate_test_failure()
if self.create_eventing_during == "after_doc_ops":
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_UPDATE, worker_count=3)
self.deploy_function(body)
self.verify_eventing_results(self.function_name, verification_dict["ops_create"],
skip_stats_validation=True)
self.log.info("Verify aborts are not consumed by eventing")
self.verify_eventing_results(self.function_name, verification_dict["ops_create"],
skip_stats_validation=True)
for server in kv_nodes:
if self.sync_write_abort_pattern == "initial_aborts":
load_gen["ADD"][server] = load_gen["ADD"][server][:1]
load_gen["SET"][server] = load_gen["SET"][server][:1]
elif self.sync_write_abort_pattern == "aborts_at_end":
load_gen["ADD"][server] = load_gen["ADD"][server][-1:]
load_gen["SET"][server] = load_gen["SET"][server][-1:]
self.log.info("Load sync_write docs such that they are successful")
for server in kv_nodes:
for gen_load in load_gen["ADD"][server]:
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, gen_load, "create", 0,
batch_size=50, process_concurrency=8,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout)
self.task.jython_task_manager.get_task_result(task)
verification_dict["ops_create"] += crud_batch_size
if len(task.fail.keys()) != 0:
self.log_failure("Some failures seen during doc_ops")
self.verify_eventing_results(self.function_name, verification_dict["ops_create"],
skip_stats_validation=True)
for gen_load in load_gen["SET"][server]:
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, gen_load, "update", 0,
batch_size=50, process_concurrency=8,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout)
self.task.jython_task_manager.get_task_result(task)
verification_dict["ops_update"] += crud_batch_size
if len(task.fail.keys()) != 0:
self.log_failure("Some failures seen during doc_ops")
self.verify_eventing_results(self.function_name, verification_dict["ops_update"],
skip_stats_validation=True)
self.log.info("Validate the mutated docs are taken into eventing")
self.verify_eventing_results(self.function_name, verification_dict["ops_create"],
skip_stats_validation=True)
self.validate_test_failure()
def test_fts_index_with_aborts(self):
"""
1. Create index (2i/view) on default bucket
2. Load multiple docs such that all sync_writes will be aborted
3. Verify nothing went into indexing
4. Load sync_write docs such that they are successful
5. Validate the mutated docs are taken into indexing
:return:
"""
self.key = "test_query_doc"
self.index_name = "fts_test_index"
self.sync_write_abort_pattern = self.input.param("sync_write_abort_pattern", "all_aborts")
self.create_index_during = self.input.param("create_index_during", "before_doc_ops")
self.restServer = self.cluster_util.get_nodes_from_services_map(service_type="fts")
self.rest = RestConnection(self.restServer)
crud_batch_size = 1000
def_bucket = self.bucket_util.buckets[0]
kv_nodes = self.cluster_util.get_kv_nodes()
replica_vbs = dict()
verification_dict = dict()
index_item_count = dict()
expected_num_indexed = dict()
load_gen = dict()
load_gen["ADD"] = dict()
load_gen["SET"] = dict()
partial_aborts = ["initial_aborts", "aborts_at_end"]
durability_helper = DurabilityHelper(
self.log, len(self.cluster.nodes_in_cluster),
durability=self.durability_level,
replicate_to=self.replicate_to,
persist_to=self.persist_to)
if self.create_index_during == "before_doc_ops":
self.create_fts_indexes(def_bucket.name, self.index_name)
curr_items = self.bucket_util.get_bucket_current_item_count(
self.cluster, def_bucket)
if self.sync_write_abort_pattern in ["all_aborts", "initial_aborts"]:
self.bucket_util.flush_bucket(kv_nodes[0], def_bucket)
self.num_items = 0
else:
self.num_items = curr_items
self.log.info("Disabling auto_failover to avoid node failures")
status = RestConnection(self.cluster.master) \
.update_autofailover_settings(False, 120, False)
self.assertTrue(status, msg="Failure during disabling auto-failover")
# Validate vbucket stats
verification_dict["ops_create"] = self.num_items
verification_dict["ops_update"] = 0
# verification_dict["ops_delete"] = 0
verification_dict["rollback_item_count"] = 0
verification_dict["sync_write_aborted_count"] = 0
verification_dict["sync_write_committed_count"] = 0
if self.create_index_during == "before_doc_ops":
self.validate_indexed_doc_count(self.index_name , verification_dict["ops_create"])
self.log.info("Loading docs such that all sync_writes will be aborted")
for server in kv_nodes:
ssh_shell = RemoteMachineShellConnection(server)
cbstats = Cbstats(ssh_shell)
replica_vbs[server] = cbstats.vbucket_list(def_bucket.name,
"replica")
load_gen["ADD"][server] = list()
load_gen["ADD"][server].append(doc_generator(
self.key, 0, crud_batch_size,
target_vbucket=replica_vbs[server],
mutation_type="ADD"))
if self.sync_write_abort_pattern in partial_aborts:
load_gen["ADD"][server].append(doc_generator(
self.key, 10000, crud_batch_size,
target_vbucket=replica_vbs[server],
mutation_type="ADD"))
verification_dict["ops_create"] += crud_batch_size
verification_dict["sync_write_committed_count"] += \
crud_batch_size
task_success = self.bucket_util.load_durable_aborts(
ssh_shell, load_gen["ADD"][server], def_bucket,
self.durability_level,
"create", self.sync_write_abort_pattern)
if not task_success:
self.log_failure("Failure during load_abort task")
verification_dict["sync_write_aborted_count"] += \
crud_batch_size
if self.create_index_during == "before_doc_ops":
self.validate_indexed_doc_count(self.index_name , verification_dict["ops_create"])
load_gen["SET"][server] = list()
load_gen["SET"][server].append(doc_generator(
self.key, 0, crud_batch_size,
target_vbucket=replica_vbs[server],
mutation_type="SET"))
if self.sync_write_abort_pattern in partial_aborts:
load_gen["SET"][server].append(doc_generator(
self.key, 10000, crud_batch_size,
target_vbucket=replica_vbs[server],
mutation_type="SET"))
verification_dict["ops_update"] += crud_batch_size
verification_dict["sync_write_committed_count"] += \
crud_batch_size
verification_dict["sync_write_aborted_count"] += \
crud_batch_size
task_success = self.bucket_util.load_durable_aborts(
ssh_shell, load_gen["SET"][server], def_bucket,
self.durability_level,
"update", self.sync_write_abort_pattern)
if not task_success:
self.log_failure("Failure during load_abort task")
ssh_shell.disconnect()
if self.create_index_during == "before_doc_ops":
self.validate_indexed_doc_count(self.index_name , verification_dict["ops_create"])
failed = durability_helper.verify_vbucket_details_stats(
def_bucket, kv_nodes,
vbuckets=self.cluster_util.vbuckets, expected_val=verification_dict)
# if failed:
# self.sleep(6000)
# self.log_failure("Cbstat vbucket-details verification failed")
self.validate_test_failure()
if self.create_index_during == "after_doc_ops":
self.create_fts_indexes(def_bucket.name, self.index_name)
self.validate_indexed_doc_count(self.index_name , verification_dict["ops_create"])
self.log.info("Verify aborts are not indexed")
self.validate_indexed_doc_count(self.index_name , verification_dict["ops_create"])
for server in kv_nodes:
if self.sync_write_abort_pattern == "initial_aborts":
load_gen["ADD"][server] = load_gen["ADD"][server][:1]
load_gen["SET"][server] = load_gen["SET"][server][:1]
elif self.sync_write_abort_pattern == "aborts_at_end":
load_gen["ADD"][server] = load_gen["ADD"][server][-1:]
load_gen["SET"][server] = load_gen["SET"][server][-1:]
self.log.info("Load sync_write docs such that they are successful")
for server in kv_nodes:
for gen_load in load_gen["ADD"][server]:
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, gen_load, "create", 0,
batch_size=50, process_concurrency=8,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout)
self.task.jython_task_manager.get_task_result(task)
if len(task.fail.keys()) != 0:
self.log_failure("Some failures seen during doc_ops")
verification_dict["ops_create"] += crud_batch_size
self.validate_indexed_doc_count(self.index_name, verification_dict["ops_create"])
for gen_load in load_gen["SET"][server]:
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, gen_load, "update", 0,
batch_size=50, process_concurrency=8,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout)
self.task.jython_task_manager.get_task_result(task)
if len(task.fail.keys()) != 0:
self.log_failure("Some failures seen during doc_ops")
verification_dict["ops_update"] += crud_batch_size
self.validate_indexed_doc_count(self.index_name , verification_dict["ops_create"])
self.log.info("Validate the mutated docs are taken into indexing")
self.validate_indexed_doc_count(self.index_name , verification_dict["ops_create"])
self.validate_test_failure()
def _create_fts_index_params(self, bucket_name, bucket_uuid, index_name):
body = {}
body['type'] = "fulltext-index"
body['name'] = index_name
body['sourceType'] = "couchbase"
body['sourceName'] = bucket_name
body['sourceUUID'] = bucket_uuid
body['planParams'] = {}
body['planParams']['maxPartitionsPerPIndex'] = 171
body['planParams']['indexPartitions'] = 6
body['params'] = {}
body['params']['doc_config'] = {}
body['params']['mapping'] = {}
body['params']['store'] = {}
body['params']['doc_config']['docid_prefix_delim'] = ""
body['params']['doc_config']['docid_regexp'] = ""
body['params']['doc_config']['mode'] = "type_field"
body['params']['doc_config']['type_field'] = "type"
body['params']['mapping']['analysis'] = {}
body['params']['mapping']['default_analyzer'] = "standard"
body['params']['mapping']['default_datetime_parser'] = "dateTimeOptional"
body['params']['mapping']['default_field'] = "_all"
body['params']['mapping']['default_mapping'] = {}
body['params']['mapping']['default_mapping']['dynamic'] = True
body['params']['mapping']['default_mapping']['enabled'] = True
body['params']['mapping']['default_type'] = "_default"
body['params']['mapping']['docvalues_dynamic'] = True
body['params']['mapping']['index_dynamic'] = True
body['params']['mapping']['store_dynamic'] = False
body['params']['mapping']['type_field'] = "_type"
body['params']['store']['indexType'] = "scorch"
body['sourceParams'] = {}
return body
def create_fts_indexes(self, bucket, index_name):
bucket_stats = self.bucket_helper.get_bucket_json(bucket)
bucket_uuid = bucket_stats["uuid"]
params = self._create_fts_index_params(bucket, bucket_uuid, index_name)
self.rest.create_fts_index(index_name, params)
def validate_indexed_doc_count(self, index, expected_index_item_count):
actual_item_count = self.rest.get_fts_index_doc_count(index)
print("actual_item_count : {0} expected_index_item_count : {1}".format(actual_item_count, expected_index_item_count))
if expected_index_item_count != actual_item_count:
raise Exception("data mismatch in fts index")
| 55.809748
| 125
| 0.667418
| 4,449
| 35,495
| 4.971904
| 0.072601
| 0.023734
| 0.038336
| 0.020886
| 0.87509
| 0.856058
| 0.851447
| 0.833409
| 0.830335
| 0.825045
| 0
| 0.010671
| 0.244908
| 35,495
| 635
| 126
| 55.897638
| 0.814641
| 0.103705
| 0
| 0.734127
| 0
| 0
| 0.100136
| 0.014666
| 0
| 0
| 0
| 0
| 0.005952
| 1
| 0.051587
| false
| 0
| 0.017857
| 0
| 0.073413
| 0.001984
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
076c4cb00c51046b98d5b769f250c28af47cacc6
| 161,434
|
py
|
Python
|
Task 2.9.2.1 Kung Fu : Pole arms - Ventral.py
|
varipon/Work-Plan-2.-Biomechanical-simulation-of-human
|
9c6bf4685189cb6970f6b36426250b08919f4801
|
[
"MIT"
] | null | null | null |
Task 2.9.2.1 Kung Fu : Pole arms - Ventral.py
|
varipon/Work-Plan-2.-Biomechanical-simulation-of-human
|
9c6bf4685189cb6970f6b36426250b08919f4801
|
[
"MIT"
] | null | null | null |
Task 2.9.2.1 Kung Fu : Pole arms - Ventral.py
|
varipon/Work-Plan-2.-Biomechanical-simulation-of-human
|
9c6bf4685189cb6970f6b36426250b08919f4801
|
[
"MIT"
] | null | null | null |
# ================
# SOFTWARE LICENSE
# ================
# The MIT License (MIT)
# Copyright (c) 2018 Yutaka Sawai (Varipon)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================
# LICENSE FOR CONTENT PROCEDURALLY GENERATED USING THIS SOFTWARE
# ==============================================================
# All content procedurally generated by this software and its permutations
# are licensed under Creative Commons Attribution By 3.0:
# https://creativecommons.org/licenses/by/3.0/
#!/usr/bin/python
import bpy
from bpy import *
import mathutils
import math
from mathutils import *
from math import *
class Formula:
J = 18 #joint number
def __init__(self, P, A, move, part, helicity, start, end):
global interval
global frame_start
global frame_end
self.interval = interval
self.frame_start = frame_start
self.frame_end = frame_end
# pivot factor
self.P = P
# scale factor
self.A = A
# name
self.move = move
# element
self.part = part
# element helicity
self.helicity = helicity
self.start = start
self.end = end
bpy.ops.object.mode_set(mode='OBJECT')
# Create armature and object
self.amt = bpy.data.armatures.new(move + '.' + part + '.' + helicity + '.data')
self.rig = bpy.data.objects.new(move + '.' + part + '.' + helicity, self.amt)
# Joints α(n) -> a[n], β(n) -> b[n], γ(n) -> y[n], δ(n) -> o[n]
self.a = [0 for i in range(4)] # Joint α
self.b = [0 for i in range(self.J)] # Joint β
self.y = [0 for i in range(self.J)] # Joint γ
self.o = [0 for i in range(self.J)] # Joint δ
# Configuration Movement
self.configMovement(self.P, self.A, self.J, self.a, self.b, self.y, self.o)
# Construction Movement
self.constructMovement(self.J, self.helicity, self.amt, self.rig, self.a, self.b, self.y, self.o)
# Construction Rotation
self.configRotation(self.rig, self.interval, self.frame_start, self.frame_end, self.start, self.end)
# Configuration Linkage
self.configLink(self.A, self.J, self.helicity, self.rig, self.move, self.part)
# Construction Linkage
self.constructLink(self.A, self.J, self.helicity, self.rig, self.move, self.part)
def configMovement(self, P, A, J, a, b, y, o):
mat_a = [0 for i in range(4)] # Joint α matrix
mat_b = [0 for i in range(self.J)] # Joint β matrix
mat_y = [0 for i in range(self.J)] # Joint γ matrix
mat_o = [0 for i in range(self.J)] # Joint δ matrix
a[1] = mathutils.Euler((P, A, 0), 'XYZ')
print ("a1 =", a[1])
a[2] = mathutils.Euler((A, -A, 0), 'XYZ')
print ("a2 =", a[2])
b[1] = mathutils.Euler((-A, A, 0), 'XYZ')
print ("b1 =", b[1])
o[1] = mathutils.Euler((A, A, 0), 'XYZ')
print ("o1 =", o[1])
B = A * 2 * sqrt (2)
C = B + (B * sqrt (2))
D = C * sqrt (2)
E = C + D
a[0] = mathutils.Euler((-A - E + (D * 0.5), -A - (D * 0.5), 0), 'XYZ')
print ("a0 =", a[0])
mat_a[0] = Matrix.Translation(a[0])
a[3] = mathutils.Euler((0-a[0].x, 0-a[0].y, 0-a[0].z), 'XYZ')
print ("a3 =", a[3])
mat_a[3] = Matrix.Translation(a[3])
y[1] = mathutils.Euler((-A, -A, 0), 'XYZ')
print ("y1 =", y[1])
mat_y[1] = Matrix.Translation(y[1])
### pattern A
b[2] = mathutils.Euler((a[0].x + E + (A * 2), a[0].y + (A * 2), 0), 'XYZ')
print ("b2 =", b[2])
mat_b[2] = Matrix.Translation(b[2])
b[3] = mathutils.Euler((a[0].x + E - (D * 0.5), a[0].y - (A * 2), 0), 'XYZ')
print ("b3 =", b[3])
mat_b[3] = Matrix.Translation(b[3])
y[2] = mathutils.Euler((a[0].x + E, a[0].y, 0), 'XYZ')
print ("y2 =", y[2])
mat_y[2] = Matrix.Translation(y[2])
y[3] = mathutils.Euler((a[0].x + E - (D * 0.5), a[0].y - (D * 0.5), 0), 'XYZ')
print ("y3 =", y[3])
mat_y[3] = Matrix.Translation(y[3])
o[2] = mathutils.Euler((a[0].x + E + (A * 2), a[0].y - (A * 2), 0), 'XYZ')
print ("o2 =", o[2])
mat_o[2] = Matrix.Translation(o[2])
o[3] = mathutils.Euler((a[0].x + E - (D * 0.5) - (A * 2), a[0].y - (D * 0.5) - (A * 2), 0), 'XYZ')
print ("o3 =", o[3])
mat_o[3] = Matrix.Translation(o[3])
### pattern A end
org_rot_mat = Matrix.Rotation(math.radians(0), 4, 'Z')
# define the rotation
rot_mat = Matrix.Rotation(math.radians(-45), 4, 'Z')
for j in range(2, J - 2):
mat_y[j + 2] = mat_a[0] * org_rot_mat * rot_mat * mat_a[3] * mat_y[j]
# obj.matrix_world = mat_y[j + 2]
# extract components back out of the matrix
loc, rot, sca = mat_y[j + 2].decompose()
y[j + 2] = mathutils.Euler(loc, 'XYZ')
print("y"+str(j + 2)+" = ", y[j + 2], rot, sca)
mat_b[j + 2] = mat_a[0] * org_rot_mat * rot_mat * mat_a[3] * mat_b[j]
# obj.matrix_world = mat_b[j + 2]
# extract components back out of the matrix
loc, rot, sca = mat_b[j + 2].decompose()
b[j + 2] = mathutils.Euler(loc, 'XYZ')
print("b"+str(j + 2)+" = ", b[j + 2], rot, sca)
mat_o[j + 2] = mat_a[0] * org_rot_mat * rot_mat * mat_a[3] * mat_o[j]
# obj.matrix_world = mat_o[j + 2]
# extract components back out of the matrix
loc, rot, sca = mat_o[j + 2].decompose()
o[j + 2] = mathutils.Euler(loc, 'XYZ')
print("o"+str(j + 2)+" = ", o[j + 2], rot, sca)
def constructMovement(self, J, helicity, amt, rig, a, b, y, o):
# Linkages
aa = [[0 for i in range(4)] for j in range(4)] # Link α(i) - α(j)
ab = [[0 for i in range(4)] for j in range(4)] # Link α(i) - β(j)
ya = [[0 for i in range(4)] for j in range(4)] # Link γ(i) - α(j)
ao = [[0 for i in range(4)] for j in range(4)] # Link α(i) - δ(j)
ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - γ(j)
by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
rig.location = mathutils.Euler((0.0, 0.0, 0.0), 'XYZ')
rig.show_x_ray = True
amt.show_names = True
amt.draw_type = 'STICK'
# amt.draw_type = 'BBONE'
# Link object to scene
scn = bpy.context.scene
scn.objects.link(rig)
scn.objects.active = rig
scn.update()
# Edit
bpy.ops.object.editmode_toggle()
# Construction Linkage
aa[2][1] = amt.edit_bones.new('a2a1')
aa[2][1].head = a[2]
aa[2][1].tail = a[1]
ab[1][1] = amt.edit_bones.new('a1b1')
ab[1][1].head = a[1]
ab[1][1].tail = b[1]
ab[1][1].parent = aa[2][1]
by[1][1] = amt.edit_bones.new('b1y1')
by[1][1].head = b[1]
by[1][1].tail = y[1]
by[1][1].parent = ab[1][1]
by[1][1].use_inherit_rotation = False
ya[1][2] = amt.edit_bones.new('y1a2')
ya[1][2].head = y[1]
ya[1][2].tail = a[2]
ya[1][2].parent = by[1][1]
ao[2][1] = amt.edit_bones.new('a2o1')
ao[2][1].head = a[2]
ao[2][1].tail = o[1]
ao[2][1].parent = ya[1][2]
ob[1][2] = amt.edit_bones.new('o1b2')
ob[1][2].head = o[1]
ob[1][2].tail = b[2]
ob[1][2].parent = ao[2][1]
yy[1][2] = amt.edit_bones.new('y1y2')
yy[1][2].head = y[1]
yy[1][2].tail = y[2]
yy[1][2].parent = by[1][1]
for j in range(2, J - 1):
by[j][j] = amt.edit_bones.new('b'+ str(j) + 'y'+ str(j))
by[j][j].head = b[j]
by[j][j].tail = y[j]
by[j][j].parent = ob[j-1][j]
yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j))
yo[j][j].head = y[j]
yo[j][j].tail = o[j]
yo[j][j].parent = yy[j-1][j]
yy[j][j+1] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+1))
yy[j][j+1].head = y[j]
yy[j][j+1].tail = y[j+1]
yy[j][j+1].parent = by[j][j]
if j < (J-2):
ob[j][j+1] = amt.edit_bones.new('o'+ str(j) + 'b'+ str(j+1))
ob[j][j+1].head = o[j]
ob[j][j+1].tail = b[j+1]
ob[j][j+1].parent = yo[j][j]
# all bones select
#bpy.ops.pose.select_all(action="SELECT")
for b in amt.edit_bones:
b.select = True
if helicity == 'right':
bpy.ops.armature.calculate_roll(type='GLOBAL_POS_Z')
else:
bpy.ops.armature.calculate_roll(type='GLOBAL_NEG_Z')
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint
cns = rig.pose.bones['y1a2'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'a2a1'
cns.chain_count = 2
cns.use_stretch = False
for j in range(2, J - 1):
cns = rig.pose.bones['b'+str(j) +'y'+str(j)].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j)+'o'+str(j)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
bpy.ops.object.mode_set(mode='OBJECT')
def configRotation(self, rig, interval, frame_start, frame_end, start, end):
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# key insert
keyframe_insert_interval = interval
rig.pose.bones["a1b1"].rotation_mode = 'XYZ'
rig.pose.bones["a1b1"].rotation_euler.z = math.radians(start)
rig.pose.bones["a1b1"].keyframe_insert(data_path="rotation_euler",frame=frame_start)
rig.pose.bones["a1b1"].rotation_mode = 'XYZ'
rig.pose.bones["a1b1"].rotation_euler.z = math.radians(end)
rig.pose.bones["a1b1"].keyframe_insert(data_path="rotation_euler",frame=frame_end)
for curve in bpy.context.active_object.animation_data.action.fcurves:
cycles = curve.modifiers.new(type='CYCLES')
cycles.mode_before = 'REPEAT_OFFSET'
cycles.mode_after = 'REPEAT_OFFSET'
for keyframe in curve.keyframe_points:
keyframe.interpolation = 'LINEAR'
bpy.ops.object.mode_set(mode='OBJECT')
def configLink(self, A, J, helicity, rig, move, part):
bpy.ops.object.mode_set(mode='OBJECT')
Q = (0.18648+0.146446)*A
# Z = -Q*2
Z = 0.0
obj_joint = bpy.data.objects["joint.gold.000"].copy()
obj_joint.location = (0.0, 0.0, -Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2a1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.silver.001"].copy()
obj_joint.location = (0.0, 0.0, +Q+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y1a2.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, +Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2o1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a1b1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for n in range(1, J - 1):
if n <= (J-2):
# Pattern 2 of by
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
# Pattern 2 of yy
obj_joint = bpy.data.objects["joint.gold.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, +Q*(1 - (n % 2))*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
if n <= (J-3):
# Pattern 1 of ob
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2 + Q*(n % 2)*6 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
# Pattern 2 of yo
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n+1)+"o"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for ob in context.scene.objects:
if "mesh" in ob.name:
ob.select = True
bpy.ops.object.make_single_user(type='SELECTED_OBJECTS', object=True, obdata=True, material=True, texture=True, animation=True)
bpy.context.scene.cursor_location = (0.0, 0.0, 0.0)
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
def constructLink(self, A, J, helicity, rig, move, part):
# Move and rotate the tip bone in pose mode
bpy.context.scene.objects.active = rig
Y = 1.1838*A
for n in rig.pose.bones:
if n.name != "o" + str(J-2) + "b" + str(J-1):
# we can get the object from the pose bone
obj = n.id_data
matrix_final = obj.matrix_world * n.matrix
# Create armature and object
lnk = bpy.data.armatures.new(n.name[:len(n.name)]+'.data.' + helicity)
lnk_rig = bpy.data.objects.new(n.name[:len(n.name)]+'.link.' + helicity, lnk)
lnk_rig.location = mathutils.Euler((0.0, 0.0, 0.0), 'XYZ')
# rig.show_x_ray = True
lnk.show_names = True
lnk.draw_type = 'STICK'
# Link object to scene
scn = bpy.context.scene
scn.objects.link(lnk_rig)
scn.objects.active = lnk_rig
scn.update()
# Create bones
# mode='EDIT'
bpy.ops.object.editmode_toggle()
link = lnk.edit_bones.new(n.name[:len(n.name)])
link.head = (0, 0, 0)
link.tail = (0, Y, 0)
link_head = lnk.edit_bones.new('head')
link_head.head = (0, 0, 0.1)
link_head.tail = (0, 0, 0)
link_head.parent = link
link_head.use_inherit_scale = False
link_tail = lnk.edit_bones.new('tail')
link_tail.head = (0, Y, 0)
link_tail.tail = (0, Y, -0.1)
link_tail.parent = link
link_tail.use_inherit_scale = False
bpy.ops.object.mode_set(mode='OBJECT')
ob = bpy.data.objects[n.name[:len(n.name)]+'.mesh.' + move + '.' + part +'.' + helicity]
ob.location = mathutils.Euler((0, 0, 0), 'XYZ')
# Give mesh object an armature modifier, using vertex groups but
# not envelopes
mod = ob.modifiers.new('MyRigModif', 'ARMATURE')
mod.object = lnk_rig
mod.use_bone_envelopes = False
mod.use_vertex_groups = True
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# Copy rotation constraints Base -> Tip
pBase = lnk_rig.pose.bones[n.name[:len(n.name)]]
cns = pBase.constraints.new('COPY_LOCATION')
cns.name = 'Copy_Location'
cns.target = rig
cns.subtarget = n.name[:len(n.name)]
cns.owner_space = 'WORLD'
cns.target_space = 'WORLD'
# Copy rotation constraints Base -> Tip
pBase = lnk_rig.pose.bones[n.name[:len(n.name)]]
cns = pBase.constraints.new('COPY_ROTATION')
cns.name = 'Copy_Rotation'
cns.target = rig
cns.subtarget = n.name[:len(n.name)]
cns.owner_space = 'WORLD'
cns.target_space = 'WORLD'
# StretchTo constraint Mid -> Tip with influence 0.5
cns1 = pBase.constraints.new('STRETCH_TO')
cns1.name = 'Stretch'
cns1.target = rig
cns1.subtarget = n.name[:len(n.name)]
cns1.head_tail = 1
cns1.rest_length = Y
cns1.influence = 1
cns1.keep_axis = 'PLANE_Z'
cns1.volume = 'NO_VOLUME'
bpy.ops.object.mode_set(mode='OBJECT')
class Pitch(Formula):
J = 2 #joint number
# Overriding
def __init__(self, P, A, move, part, helicity, start, end,
body_loc, body_rot, body):
global interval
global frame_start
global frame_end
self.interval = interval
self.frame_start = frame_start
self.frame_end = frame_end
# pivot factor
self.P = P
# scale factor
self.A = A
# name
self.move = move
# element
self.part = part
# element helicity
self.helicity = helicity
self.start = start
self.end = end
# body
self.body_loc = body_loc
self.body_rot = body_rot
self.body = body
bpy.ops.object.mode_set(mode='OBJECT')
# Create armature and object
self.amt = bpy.data.armatures.new(move + '.' + part + '.' + helicity + '.data')
self.rig = bpy.data.objects.new(move + '.' + part + '.' + helicity, self.amt)
# Joints α(n) -> a[n], β(n) -> b[n], γ(n) -> y[n]
self.a = [0 for i in range(4)] # Joint α
self.b = [0 for i in range(self.J)] # Joint β
self.y = [0 for i in range(self.J)] # Joint γ
# Configuration Movement
self.configMovement(self.P, self.A, self.J, self.a, self.b, self.y)
# Construction Movement
self.constructMovement(self.J, self.helicity, self.amt, self.rig, self.a, self.b, self.y)
# Parent body to pitch
self.setParent(self.helicity, self.move, self.rig, self.body_loc, self.body_rot, self.body)
# Construction Rotation
self.configRotation(self.rig, self.interval, self.frame_start, self.frame_end, self.start, self.end)
# Configuration Linkage
self.configLink(self.A*0.3, self.J, self.helicity, self.rig, self.move, self.part)
# Construction Linkage
self.constructLink(self.A*0.3, self.J, self.helicity, self.rig, self.move, self.part)
def configMovement(self, P, A, J, a, b, y):
mat_a = [0 for i in range(4)] # Joint α matrix
mat_b = [0 for i in range(self.J)] # Joint β matrix
mat_y = [0 for i in range(self.J)] # Joint γ matrix
a[1] = mathutils.Euler((P, A, 0), 'XYZ')
print ("a1 =", a[1])
a[2] = mathutils.Euler((A, -A, 0), 'XYZ')
print ("a2 =", a[2])
b[1] = mathutils.Euler((-A, A, 0), 'XYZ')
print ("b1 =", b[1])
y[1] = mathutils.Euler((-A, -A, 0), 'XYZ')
print ("y1 =", y[1])
mat_y[1] = Matrix.Translation(y[1])
def constructMovement(self, J, helicity, amt, rig, a, b, y):
# Linkages
aa = [[0 for i in range(4)] for j in range(4)] # Link α(i) - α(j)
ab = [[0 for i in range(4)] for j in range(4)] # Link α(i) - β(j)
ya = [[0 for i in range(4)] for j in range(4)] # Link γ(i) - α(j)
by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
rig.location = mathutils.Euler((0.0, 0.0, 0.0), 'XYZ')
rig.show_x_ray = True
amt.show_names = True
amt.draw_type = 'STICK'
# amt.draw_type = 'BBONE'
# Link object to scene
scn = bpy.context.scene
scn.objects.link(rig)
scn.objects.active = rig
scn.update()
# Edit
bpy.ops.object.editmode_toggle()
# Construction Linkage
aa[2][1] = amt.edit_bones.new('a2a1')
aa[2][1].head = a[2]
aa[2][1].tail = a[1]
ab[1][1] = amt.edit_bones.new('a1b1')
ab[1][1].head = a[1]
ab[1][1].tail = b[1]
ab[1][1].parent = aa[2][1]
by[1][1] = amt.edit_bones.new('b1y1')
by[1][1].head = b[1]
by[1][1].tail = y[1]
by[1][1].parent = ab[1][1]
by[1][1].use_inherit_rotation = False
ya[1][2] = amt.edit_bones.new('y1a2')
ya[1][2].head = y[1]
ya[1][2].tail = a[2]
ya[1][2].parent = by[1][1]
# all bones select
#bpy.ops.pose.select_all(action="SELECT")
for b in amt.edit_bones:
b.select = True
if helicity == 'right':
bpy.ops.armature.calculate_roll(type='GLOBAL_POS_Z')
else:
bpy.ops.armature.calculate_roll(type='GLOBAL_NEG_Z')
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint
cns = rig.pose.bones['y1a2'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'a2a1'
cns.chain_count = 2
cns.use_stretch = False
bpy.ops.object.mode_set(mode='OBJECT')
def configLink(self, A, J, helicity, rig, move, part):
bpy.ops.object.mode_set(mode='OBJECT')
Q = (0.18648+0.146446)*A
# Z = -Q*2
Z = 0.0
obj_joint = bpy.data.objects["joint.gold.000"].copy()
obj_joint.location = (0.0, 0.0, -Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2a1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.silver.A"].copy()
obj_joint.location = (0.0, 0.0, +Q+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y1a2.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a1b1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
n = 1
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for ob in context.scene.objects:
if "mesh" in ob.name:
ob.select = True
bpy.ops.object.make_single_user(type='SELECTED_OBJECTS', object=True, obdata=True, material=True, texture=True, animation=True)
bpy.context.scene.cursor_location = (0.0, 0.0, 0.0)
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
# Parent set fingers to arm
def setParent(self, helicity, move, rig,
body_loc, body_rot, body):
# body position
body.rig.location = body_loc
body.rig.rotation_euler = body_rot
# body to pitch
bpy.ops.object.mode_set(mode='OBJECT')
bpy.context.scene.frame_current = 0
bpy.ops.object.select_all(action='DESELECT')
rig.select = True
bpy.context.scene.objects.active = rig
bpy.ops.object.editmode_toggle()
parent_bone = 'b1y1' # choose the bone name which you want to be the parent
rig.data.edit_bones.active = rig.data.edit_bones[parent_bone]
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
body.rig.select = True
rig.select = True
bpy.context.scene.objects.active = rig #the active object will be the parent of all selected object
bpy.ops.object.parent_set(type='BONE', keep_transform=True)
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
# end
class Body(Formula):
J = 7 #joint number
# Overriding
def __init__(self, P, A, move, part, helicity, start, end,
arm_left_loc, arm_left_rot, arm_left,
arm_right_loc, arm_right_rot, arm_right):
global interval
global frame_start
global frame_end
self.interval = interval
self.frame_start = frame_start
self.frame_end = frame_end
# pivot factor
self.P = P
# scale factor
self.A = A
# name
self.move = move
# element
self.part = part
# element helicity
self.helicity = helicity
self.start = start
self.end = end
bpy.ops.object.mode_set(mode='OBJECT')
# Create armature and object
self.amt = bpy.data.armatures.new(move + '.' + part + '.' + helicity + '.data')
self.rig = bpy.data.objects.new(move + '.' + part + '.' + helicity, self.amt)
self.arm_left_loc = arm_left_loc
self.arm_left_rot = arm_left_rot
self.arm_left = arm_left
self.arm_right_loc = arm_right_loc
self.arm_right_rot = arm_right_rot
self.arm_right = arm_right
# Centroid
# Joints α(n) -> a[n], β(n) -> b[n], γ(n) -> y[n], δ(n) -> o[n]
self.a = [0 for i in range(3)] # Joint α
self.b = [0 for i in range(2)] # Joint β
self.y = [0 for i in range(2)] # Joint γ
self.o = [0 for i in range(2)] # Joint δ
# Upper body
# Joints α(n) -> a[n], β(n) -> b[n], γ(n) -> y[n], δ(n) -> o[n]
self.upper_b = [0 for i in range(self.J)] # Joint β
self.upper_y = [0 for i in range(self.J)] # Joint γ
self.upper_o = [0 for i in range(self.J)] # Joint δ
# Joints ω(n) -> w[n]
self.upper_w = [0 for i in range(self.J)] # Joint ω
# Left shoulder
# Joints α(n) -> a[n], β(n) -> b[n], γ(n) -> y[n], δ(n) -> o[n]
self.upper_left_b = [0 for i in range(self.J)] # Joint β
self.upper_left_y = [0 for i in range(self.J)] # Joint γ
self.upper_left_o = [0 for i in range(self.J)] # Joint δ
self.upper_left_w = [0 for i in range(self.J)] # Joint ω matrix
# Right shoulder
# Joints α(n) -> a[n], β(n) -> b[n], γ(n) -> y[n], δ(n) -> o[n]
self.upper_right_b = [0 for i in range(self.J)] # Joint β
self.upper_right_y = [0 for i in range(self.J)] # Joint γ
self.upper_right_o = [0 for i in range(self.J)] # Joint δ
self.upper_right_w = [0 for i in range(self.J)] # Joint ω matrix
# Lower body
# Joints α(n) -> a[n], β(n) -> b[n], γ(n) -> y[n], δ(n) -> o[n]
self.lower_b = [0 for i in range(self.J)] # Joint β
self.lower_y = [0 for i in range(self.J)] # Joint γ
self.lower_o = [0 for i in range(self.J)] # Joint δ
# Joints ω(n) -> w[n]
self.lower_w = [0 for i in range(self.J)] # Joint ω
# Left leg
# Joints α(n) -> a[n], β(n) -> b[n], γ(n) -> y[n], δ(n) -> o[n]
self.lower_left_b = [0 for i in range(self.J)] # Joint β
self.lower_left_y = [0 for i in range(self.J)] # Joint γ
self.lower_left_o = [0 for i in range(self.J)] # Joint δ
# Right leg
# Joints α(n) -> a[n], β(n) -> b[n], γ(n) -> y[n], δ(n) -> o[n]
self.lower_right_b = [0 for i in range(self.J)] # Joint β
self.lower_right_y = [0 for i in range(self.J)] # Joint γ
self.lower_right_o = [0 for i in range(self.J)] # Joint δ
# gimbal
self.gimbal_lower_left_o = [0 for i in range(self.J)] # Joint δ
self.gimbal_lower_left_b = [0 for i in range(self.J)] # Joint β
self.gimbal_lower_left_y = [0 for i in range(self.J)] # Joint γ
self.gimbal_lower_right_o = [0 for i in range(self.J)] # Joint δ
self.gimbal_lower_right_b = [0 for i in range(self.J)] # Joint β
self.gimbal_lower_right_y = [0 for i in range(self.J)] # Joint γ
# Configuration Movement
self.configMovement(self.P, self.A, self.J, self.a, self.b, self.y, self.o,
self.upper_b, self.upper_y, self.upper_o, self.upper_w,
self.upper_left_b, self.upper_left_y, self.upper_left_o,
self.upper_right_b, self.upper_right_y, self.upper_right_o,
self.lower_b, self.lower_y, self.lower_o, self.lower_w,
self.lower_left_b, self.lower_left_y, self.lower_left_o,
self.lower_right_b, self.lower_right_y, self.lower_right_o,
self.gimbal_lower_left_o, self.gimbal_lower_left_b, self.gimbal_lower_left_y,
self.gimbal_lower_right_o, self.gimbal_lower_right_b, self.gimbal_lower_right_y)
# Construction Movement
self.constructMovement(self.J, self.amt, self.rig,
self.a, self.b, self.y, self.o,
self.upper_b, self.upper_y, self.upper_o, self.upper_w,
self.upper_left_b, self.upper_left_y, self.upper_left_o,
self.upper_right_b, self.upper_right_y, self.upper_right_o,
self.lower_b, self.lower_y, self.lower_o, self.lower_w,
self.lower_left_b, self.lower_left_y, self.lower_left_o,
self.lower_right_b, self.lower_right_y, self.lower_right_o,
self.gimbal_lower_left_o, self.gimbal_lower_left_b, self.gimbal_lower_left_y,
self.gimbal_lower_right_o, self.gimbal_lower_right_b, self.gimbal_lower_right_y)
# Parent set arms to body
self.setParent(self.helicity, self.move, self.rig,
self.arm_left_loc, self.arm_left_rot, self.arm_left,
self.arm_right_loc, self.arm_right_rot, self.arm_right)
# Construction Rotation
self.configRotation(self.rig, self.interval, self.frame_start, self.frame_end, self.start, self.end)
self.configLink(self.A*0.7, self.J, self.helicity, self.rig, self.move, self.part)
# Construction Linkage
self.constructLink(self.A*0.7, self.J, self.helicity, self.rig, self.move, self.part)
# Overriding Configuration Movement
def configMovement(self, P, A, J, a, b, y, o,
upper_b, upper_y, upper_o, upper_w,
upper_left_b, upper_left_y, upper_left_o,
upper_right_b, upper_right_y, upper_right_o,
lower_b, lower_y, lower_o, lower_w,
lower_left_b, lower_left_y, lower_left_o,
lower_right_b, lower_right_y, lower_right_o,
gimbal_lower_left_o, gimbal_lower_left_b, gimbal_lower_left_y,
gimbal_lower_right_o, gimbal_lower_right_b, gimbal_lower_right_y):
a[1] = mathutils.Euler((P, A, 0), 'XYZ')
print ("a1 =", a[1])
a[2] = mathutils.Euler((A, -A, 0), 'XYZ')
print ("a2 =", a[2])
b[1] = mathutils.Euler((-A, A, 0), 'XYZ')
print ("b1 =", b[1])
o[1] = mathutils.Euler((A, A, 0), 'XYZ')
print ("o1 =", o[1])
y[1] = mathutils.Euler((-A, -A, 0), 'XYZ')
print ("y1 =", y[1])
lower_b[2] = mathutils.Euler((1.35031, -1.93408, 0), 'XYZ')
print ("b2.lower =", lower_b[2])
lower_o[2] = mathutils.Euler((-1.18173, -3.18999, 0), 'XYZ')
print ("o2.lower =", lower_o[2])
lower_y[2] = mathutils.Euler((-0.761987, -3.11885, 0), 'XYZ')
print ("y2.lower =", lower_y[2])
lower_y[3] = mathutils.Euler((-0.425565, -8.51839, 0), 'XYZ')
print ("y3.lower =", lower_y[3])
lower_w[1] = mathutils.Euler((-0.425565, -8.51839, 2.50277), 'XYZ')
print ("w1.lower =", lower_w[1])
lower_left_o[3] = mathutils.Euler((1.76787, -8.43042, 1.81914), 'XYZ')
print ("o3.lower.left =", lower_left_o[3])
lower_left_b[4] = mathutils.Euler((1.76787, -19.2299, 5.0545), 'XYZ')
print ("b4.lower.left =", lower_left_b[4])
lower_left_y[4] = mathutils.Euler((1.76787, -27.4568, 2.13126), 'XYZ')
print ("y4.lower.left =", lower_left_y[4])
lower_left_y[5] = mathutils.Euler((1.76787, -29.0398, 4.39707), 'XYZ')
print ("y5.lower.left =", lower_left_y[5])
gimbal_lower_left_o[3] = mathutils.Euler((1.87361, -8.51839, 2.50277), 'XYZ')
print ("o3.gimbal.lower.left =", gimbal_lower_left_o[3])
gimbal_lower_left_b[4] = mathutils.Euler((1.87361, -19.7847, 2.50277), 'XYZ')
print ("b4.gimbal.lower.left =", gimbal_lower_left_b[4])
gimbal_lower_left_o[4] = mathutils.Euler((1.77335, -8.4289, 1.81649), 'XYZ')
print ("o4.gimbal.lower.left =", gimbal_lower_left_o[4])
gimbal_lower_left_b[5] = mathutils.Euler((5.14961, -19.1839, 1.81649), 'XYZ')
print ("b5.gimbal.lower.left =", gimbal_lower_left_b[5])
gimbal_lower_left_y[5] = mathutils.Euler((2.33473, -27.4476, 1.81649), 'XYZ')
print ("y5.gimbal.lower.left =", gimbal_lower_left_y[5])
gimbal_lower_left_y[6] = mathutils.Euler((2.91482, -27.429, 1.81649), 'XYZ')
print ("y6.gimbal.lower.left =", gimbal_lower_left_y[6])
lower_right_o[3] = mathutils.Euler((-2.89871, -8.60219, 1.75624), 'XYZ')
print ("o3.lower.right =", lower_right_o[3])
lower_right_b[4] = mathutils.Euler((-2.89871, -19.3735, 5.04104), 'XYZ')
print ("b4.lower.right =", lower_right_b[4])
lower_right_y[4] = mathutils.Euler((-2.89871, -27.5528, 1.98242), 'XYZ')
print ("y4.lower.right =", lower_right_y[4])
lower_right_y[5] = mathutils.Euler((-2.89871, -29.1751, 4.22026), 'XYZ')
print ("y5.lower.right =", lower_right_y[5])
gimbal_lower_right_o[3] = mathutils.Euler((-3.01028, -8.51839, 2.50277), 'XYZ')
print ("o3.gimbal.lower.right =", gimbal_lower_right_o[3])
gimbal_lower_right_b[4] = mathutils.Euler((-3.01028, -19.7726, 2.50277), 'XYZ')
print ("b4.gimbal.lower.right =", gimbal_lower_right_b[4])
gimbal_lower_right_o[4] = mathutils.Euler((-2.89871, -8.60219, 1.75624), 'XYZ')
print ("o4.gimbal.lower.right =", gimbal_lower_right_o[4])
gimbal_lower_right_b[5] = mathutils.Euler((-6.82237, -19.1528, 1.75624), 'XYZ')
print ("b5.gimbal.lower.right =", gimbal_lower_right_b[5])
gimbal_lower_right_y[5] = mathutils.Euler((-4.35508, -27.5285, 1.75624), 'XYZ')
print ("y5.gimbal.lower.right =", gimbal_lower_right_y[5])
gimbal_lower_right_y[6] = mathutils.Euler((-4.88072, -27.5137, 1.75624), 'XYZ')
print ("y6.gimbal.lower.right =", gimbal_lower_right_y[6])
upper_b[2] = mathutils.Euler((0.510293, 5.22315, 0), 'XYZ')
print ("b2.upper =", upper_b[2])
upper_o[2] = mathutils.Euler((-1.65578, 4.62023, 0), 'XYZ')
print ("o2.upper =", upper_o[2])
upper_y[2] = mathutils.Euler((-1.56747, 4.00093, 0), 'XYZ')
print ("y2.upper =", upper_y[2])
upper_w[3] = mathutils.Euler((-1.56747, 4.00093, 9.05079), 'XYZ')
print ("w3.upper =", upper_w[3])
upper_w[4] = mathutils.Euler((-1.65459, 3.99465, 9.05079), 'XYZ')
print ("w4.upper =", upper_w[4])
upper_w[5] = mathutils.Euler((-1.65459, 3.99465, 1.61675), 'XYZ')
print ("w5.upper =", upper_w[5])
upper_y[3] = mathutils.Euler((-1.65459, 4.6204, 0), 'XYZ')
print ("y3.upper.left =", upper_y[3])
upper_w[2] = mathutils.Euler((-1.65459, 4.6204, 9.05079), 'XYZ')
print ("w2.upper =", upper_w[2])
upper_o[3] = mathutils.Euler((-2.07892, 9.71201, 0), 'XYZ')
print ("o3.upper =", upper_o[3])
upper_w[1] = mathutils.Euler((-2.07852, 9.71278, 0.712845), 'XYZ')
print ("w1.upper =", upper_w[1])
upper_b[4] = mathutils.Euler((-2.07852, 10.4327, 0.669667), 'XYZ')
print ("o3.upper =", upper_o[3])
upper_left_y[3] = mathutils.Euler((-1.65578, 4.62023, 0), 'XYZ')
print ("y3.upper.left =", upper_left_y[3])
upper_left_b[3] = mathutils.Euler((1.84964, 4.81322, 0), 'XYZ')
print ("b3.upper.left =", upper_left_b[3])
upper_left_y[2] = mathutils.Euler((-1.56747, 3.99717, 0), 'XYZ')
print ("y2.upper.left =", upper_left_y[2])
upper_left_o[2] = mathutils.Euler((2.0891, 4.23926, 0), 'XYZ')
print ("o2.upper.left =", upper_left_o[2])
# upper_left_w[1] = mathutils.Euler((2.52833, 3.67482, 0), 'XYZ')
# print ("w1.upper.left =", upper_left_w[1])
# upper_left_w[2] = mathutils.Euler((1.49581, 3.75702, 2.28162), 'XYZ')
# print ("w2.upper.left =", upper_left_w[2])
# upper_left_w[3] = mathutils.Euler((0.480431, 3.83787, 4.52539), 'XYZ')
# print ("w3.upper.left =", upper_left_w[3])
# upper_left_w[4] = mathutils.Euler((-0.54352, 3.9194, 6.78809), 'XYZ')
# print ("w4.upper.left =", upper_left_w[4])
# upper_left_w[5] = mathutils.Euler((2.28863, 3.62687, 0), 'XYZ')
# print ("w5.upper.left =", upper_left_w[5])
# upper_left_w[6] = mathutils.Euler((2.28863, 3.62687, 4.96987), 'XYZ')
# print ("w6.upper.left =", upper_left_w[6])
upper_right_y[3] = mathutils.Euler((-1.65578, 4.62023, 0), 'XYZ')
print ("y3.upper.right =", upper_right_y[3])
upper_right_b[3] = mathutils.Euler((-5.25711, 3.84599, 0), 'XYZ')
print ("b3.upper.right =", upper_right_b[3])
upper_right_y[2] = mathutils.Euler((-1.56747, 3.99717, 0), 'XYZ')
print ("y2.upper.right =", upper_right_y[2])
upper_right_o[2] = mathutils.Euler((-5.17039, 3.22555, 0), 'XYZ')
print ("o2.upper.right =", upper_right_o[2])
# upper_right_w[1] = mathutils.Euler((-5.28344, 2.59559, 0), 'XYZ')
# print ("w1.upper.right =", upper_right_w[1])
# upper_right_w[2] = mathutils.Euler((-4.36226, 2.94397, 2.24368), 'XYZ')
# print ("w2.upper.right =", upper_right_w[2])
# upper_right_w[3] = mathutils.Euler((-3.42546, 3.29826, 4.52539), 'XYZ')
# print ("w3.upper.right =", upper_right_w[3])
# upper_right_w[4] = mathutils.Euler((-2.49646, 3.64959, 6.78809), 'XYZ')
# print ("w4.upper.right =", upper_right_w[4])
# upper_right_w[5] = mathutils.Euler((-5.37016, 2.58956, 0), 'XYZ')
# print ("w5.upper.right =", upper_right_w[5])
# upper_right_w[6] = mathutils.Euler((-5.37016, 2.58956, 4.94216), 'XYZ')
# print ("w6.upper.right =", upper_right_w[6])
def constructMovement(self, J, amt, rig, a, b, y, o,
upper_b, upper_y, upper_o, upper_w,
upper_left_b, upper_left_y, upper_left_o,
upper_right_b, upper_right_y, upper_right_o,
lower_b, lower_y, lower_o, lower_w,
lower_left_b, lower_left_y, lower_left_o,
lower_right_b, lower_right_y, lower_right_o,
gimbal_lower_left_o, gimbal_lower_left_b, gimbal_lower_left_y,
gimbal_lower_right_o, gimbal_lower_right_b, gimbal_lower_right_y):
# Linkages
aa = [[0 for i in range(3)] for j in range(3)] # Link α(i) - α(j)
ab = [[0 for i in range(3)] for j in range(3)] # Link α(i) - β(j)
ya = [[0 for i in range(3)] for j in range(3)] # Link γ(i) - α(j)
ao = [[0 for i in range(3)] for j in range(3)] # Link α(i) - δ(j)
by = [[0 for i in range(2)] for j in range(2)] # Link β(i) - γ(j)
upper_ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
upper_yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - γ(j)
upper_by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
upper_yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
upper_ow = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
upper_ww = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
upper_left_ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
upper_left_yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - γ(j)
upper_left_by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
upper_left_yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
# upper_left_ow = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - ω(j)
# upper_left_ww = [[0 for i in range(self.J)] for j in range(self.J)] # Link ω(i) - ω(j)
upper_right_ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
upper_right_yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - γ(j)
upper_right_by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
upper_right_yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
# upper_right_ow = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - ω(j)
# upper_right_ww = [[0 for i in range(self.J)] for j in range(self.J)] # Link ω(i) - ω(j)
lower_ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
lower_yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - γ(j)
lower_by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
lower_yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
lower_yw = [[0 for i in range(2)] for j in range(self.J)] # Link γ(i) - ω(j)
lower_left_ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
lower_left_yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
lower_left_by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
lower_left_yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - γ(j)
lower_right_ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
lower_right_yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
lower_right_by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
lower_right_yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - γ(j)
gimbal_lower_left_yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
gimbal_lower_left_ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
gimbal_lower_left_by = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
gimbal_lower_left_yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
gimbal_lower_right_yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
gimbal_lower_right_ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
gimbal_lower_right_by = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
gimbal_lower_right_yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
# gimbal_upper_left_ow = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - ω(j)
# gimbal_upper_left_ww = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - ω(j)
# gimbal_upper_right_ow = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - ω(j)
# gimbal_upper_right_ww = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - ω(j)
rig.location = mathutils.Euler((0.0, 0.0, 0.0), 'XYZ')
rig.show_x_ray = True
amt.show_names = True
amt.draw_type = 'STICK'
# amt.draw_type = 'BBONE'
# Link object to scene
scn = bpy.context.scene
scn.objects.link(rig)
scn.objects.active = rig
scn.update()
# Edit
bpy.ops.object.editmode_toggle()
j = 1
# Construction Linkage
aa[j+1][j] = amt.edit_bones.new('a'+ str(j+1)+'a'+ str(j))
aa[j+1][j].head = a[j+1]
aa[j+1][j].tail = a[j]
# aa[j+1][j].parent = by[j][j]_body
ab[j][j] = amt.edit_bones.new('a'+ str(j)+'b'+ str(j))
ab[j][j].head = a[j]
ab[j][j].tail = b[j]
ab[j][j].parent = aa[j+1][j]
by[j][j] = amt.edit_bones.new('b'+ str(j)+'y'+ str(j))
by[j][j].head = b[j]
by[j][j].tail = y[j]
by[j][j].parent = ab[j][j]
by[j][j].use_inherit_rotation = False
ya[j][j+1] = amt.edit_bones.new('y'+ str(j)+'a'+ str(j+1))
ya[j][j+1].head = y[j]
ya[j][j+1].tail = a[j+1]
ya[j][j+1].parent = by[j][j]
ao[j+1][j] = amt.edit_bones.new('a'+ str(j+1)+'o'+str(j))
ao[j+1][j].head = a[j+1]
ao[j+1][j].tail = o[j]
ao[j+1][j].parent = ya[j][j+1]
lower_ob[j][j+1] = amt.edit_bones.new('o'+ str(j)+'b'+ str(j+1)+'.lower')
lower_ob[j][j+1].head = o[j]
lower_ob[j][j+1].tail = lower_b[j+1]
lower_ob[j][j+1].parent = ao[j+1][j]
lower_yy[j][j+1] = amt.edit_bones.new('y'+ str(j)+'y'+ str(j+1)+'.lower')
lower_yy[j][j+1].head = y[j]
lower_yy[j][j+1].tail = lower_y[j+1]
lower_yy[j][j+1].parent = by[j][j]
upper_ob[j][j+1] = amt.edit_bones.new('o'+ str(j)+'b'+ str(j+1)+'.upper')
upper_ob[j][j+1].head = o[j]
upper_ob[j][j+1].tail = upper_b[j+1]
upper_ob[j][j+1].parent = ao[j+1][j]
upper_yy[j][j+1] = amt.edit_bones.new('y'+ str(j)+'y'+ str(j+1)+'.upper')
upper_yy[j][j+1].head = y[j]
upper_yy[j][j+1].tail = upper_y[j+1]
upper_yy[j][j+1].parent = by[j][j]
j = 2
lower_by[j][j] = amt.edit_bones.new('b'+ str(j) + 'y'+ str(j) + '.lower')
lower_by[j][j].head = lower_b[j]
lower_by[j][j].tail = lower_y[j]
lower_by[j][j].parent = lower_ob[j-1][j]
lower_yy[j][j+1] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+1) + '.lower')
lower_yy[j][j+1].head = lower_y[j]
lower_yy[j][j+1].tail = lower_y[j+1]
lower_yy[j][j+1].parent = lower_by[j][j]
lower_yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j) + '.lower')
lower_yo[j][j].head = lower_y[j]
lower_yo[j][j].tail = lower_o[j]
lower_yo[j][j].parent = lower_yy[j-1][j]
upper_by[j][j] = amt.edit_bones.new('b'+ str(j) + 'y'+ str(j) + '.upper')
upper_by[j][j].head = upper_b[j]
upper_by[j][j].tail = upper_y[j]
upper_by[j][j].parent = upper_ob[j-1][j]
upper_yy[j][j+1] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+1) + '.upper')
upper_yy[j][j+1].head = upper_y[j]
upper_yy[j][j+1].tail = upper_y[j+1]
upper_yy[j][j+1].parent = upper_by[j][j]
# upper w2w3
# upper_ww[j][j+1] = amt.edit_bones.new('w'+ str(j) + 'w'+ str(j+1) + '.upper')
# upper_ww[j][j+1].head = upper_w[j]
# upper_ww[j][j+1].tail = upper_w[j+1]
## upper_ww[j][j+1].parent = upper_yy[j][j+1]
# left shoulder gimbal
upper_left_yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j) + '.upper.left')
upper_left_yo[j][j].head = upper_y[j]
upper_left_yo[j][j].tail = upper_left_o[j]
upper_left_yo[j][j].parent = upper_yy[j-1][j]
upper_left_ob[j][j+1] = amt.edit_bones.new('o' + str(j) + 'b'+ str(j+1) + '.upper.left')
upper_left_ob[j][j+1].head = upper_left_o[j]
upper_left_ob[j][j+1].tail = upper_left_b[j+1]
upper_left_ob[j][j+1].parent = upper_left_yo[j][j]
# gimbal o2w5
# gimbal_upper_left_ow[j][j+3] = amt.edit_bones.new('o' + str(j) + 'w'+ str(j+3) + '.gimbal.upper.left')
# gimbal_upper_left_ow[j][j+3].head = upper_left_o[j]
# gimbal_upper_left_ow[j][j+3].tail = upper_left_w[j+3]
# gimbal_upper_left_ow[j][j+3].parent = upper_left_ob[j][j+1]
# upper_left_ow[j][j-1] = amt.edit_bones.new('o' + str(j) + 'w'+ str(j-1) + '.upper.left')
# upper_left_ow[j][j-1].head = upper_left_o[j]
# upper_left_ow[j][j-1].tail = upper_left_w[j-1]
# upper_left_ow[j][j-1].parent = upper_left_ob[j][j+1]
# w1w3
# upper_left_ww[j-1][j+1] = amt.edit_bones.new('w' + str(j-1) + 'w'+ str(j+1) + '.upper.left')
# upper_left_ww[j-1][j+1].head = upper_left_w[j-1]
# upper_left_ww[j-1][j+1].tail = upper_left_w[j+1]
# upper_left_ww[j-1][j+1].parent = upper_left_ow[j][j-1]
# left w3w2
# upper_left_ww[j+1][j] = amt.edit_bones.new('w' + str(j+1) + 'w'+ str(j) + '.upper.left')
# upper_left_ww[j+1][j].head = upper_w[j+1]
# upper_left_ww[j+1][j].tail = upper_left_w[j]
# upper_left_ww[j+1][j].parent = upper_ww[j][j+1]
# left w2w1
# upper_left_ww[j][j-1] = amt.edit_bones.new('w' + str(j) + 'w'+ str(j-1) + '.upper.left')
# upper_left_ww[j][j-1].head = upper_left_w[j]
# upper_left_ww[j][j-1].tail = upper_left_w[j-1]
# upper_left_ww[j][j-1].parent = upper_left_ww[j+1][j]
# left gimbal w2w4
# gimbal_upper_left_ww[j][j+2] = amt.edit_bones.new('w' + str(j) + 'w'+ str(j+2) +'.gimbal.upper.left')
# gimbal_upper_left_ww[j][j+2].head = upper_w[j]
# gimbal_upper_left_ww[j][j+2].tail = upper_w[j+2]
# gimbal_upper_left_ww[j][j+2].parent = upper_ww[j][j+1]
# right shoulder gimbal
upper_right_yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j) + '.upper.right')
upper_right_yo[j][j].head = upper_y[j]
upper_right_yo[j][j].tail = upper_right_o[j]
upper_right_yo[j][j].parent = upper_yy[j-1][j]
upper_right_ob[j][j+1] = amt.edit_bones.new('o' + str(j) + 'b'+ str(j+1) + '.upper.right')
upper_right_ob[j][j+1].head = upper_right_o[j]
upper_right_ob[j][j+1].tail = upper_right_b[j+1]
upper_right_ob[j][j+1].parent = upper_right_yo[j][j]
# gimbal o2w5
# gimbal_upper_right_ow[j][j+3] = amt.edit_bones.new('o' + str(j) + 'w'+ str(j+3) + '.gimbal.upper.right')
# gimbal_upper_right_ow[j][j+3].head = upper_right_o[j]
# gimbal_upper_right_ow[j][j+3].tail = upper_right_w[j+3]
# gimbal_upper_right_ow[j][j+3].parent = upper_right_ob[j][j+1]
# upper_right_ow[j][j-1] = amt.edit_bones.new('o' + str(j) + 'w'+ str(j-1) + '.upper.right')
# upper_right_ow[j][j-1].head = upper_right_o[j]
# upper_right_ow[j][j-1].tail = upper_right_w[j-1]
# upper_right_ow[j][j-1].parent = upper_right_ob[j][j+1]
# w1w3
# upper_right_ww[j-1][j+1] = amt.edit_bones.new('w' + str(j-1) + 'w'+ str(j+1) + '.upper.right')
# upper_right_ww[j-1][j+1].head = upper_right_w[j-1]
# upper_right_ww[j-1][j+1].tail = upper_right_w[j+1]
# upper_right_ww[j-1][j+1].parent = upper_right_ow[j][j-1]
# right w3w2
# upper_right_ww[j+1][j] = amt.edit_bones.new('w' + str(j+1) + 'w'+ str(j) + '.upper.right')
# upper_right_ww[j+1][j].head = upper_w[j+1]
# upper_right_ww[j+1][j].tail = upper_right_w[j]
# upper_right_ww[j+1][j].parent = upper_ww[j][j+1]
# right w2w1
# upper_right_ww[j][j-1] = amt.edit_bones.new('w' + str(j) + 'w'+ str(j-1) + '.upper.right')
# upper_right_ww[j][j-1].head = upper_right_w[j]
# upper_right_ww[j][j-1].tail = upper_right_w[j-1]
# upper_right_ww[j][j-1].parent = upper_right_ww[j+1][j]
# right gimbal w2w4
# gimbal_upper_right_ww[j][j+2] = amt.edit_bones.new('w' + str(j) + 'w'+ str(j+2) +'.gimbal.upper.right')
# gimbal_upper_right_ww[j][j+2].head = upper_w[j]
# gimbal_upper_right_ww[j][j+2].tail = upper_w[j+2]
# gimbal_upper_right_ww[j][j+2].parent = upper_ww[j][j+1]
j = 3
lower_yw[j][1] = amt.edit_bones.new('y'+ str(j) + 'w'+ str(1) + '.lower')
lower_yw[j][1].head = lower_y[j]
lower_yw[j][1].tail = lower_w[1]
lower_yw[j][1].parent = lower_yy[2][j]
lower_left_yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j) + '.lower.left')
lower_left_yo[j][j].head = lower_w[1]
lower_left_yo[j][j].tail = lower_left_o[j]
lower_left_yo[j][j].parent = lower_yw[j][1]
lower_left_ob[j][j+1] = amt.edit_bones.new('o' + str(j) + 'b'+ str(j+1) + '.lower.left')
lower_left_ob[j][j+1].head = lower_left_o[j]
lower_left_ob[j][j+1].tail = lower_left_b[j+1]
lower_left_ob[j][j+1].parent = lower_left_yo[j][j]
lower_right_yo[j][j] = amt.edit_bones.new('y' + str(j) + 'o'+ str(j) +'.lower.right')
lower_right_yo[j][j].head = lower_w[1]
lower_right_yo[j][j].tail = lower_right_o[j]
lower_right_yo[j][j].parent = lower_yw[j][1]
lower_right_ob[j][j+1] = amt.edit_bones.new('o' + str(j) + 'b'+ str(j+1) +'.lower.right')
lower_right_ob[j][j+1].head = lower_right_o[j]
lower_right_ob[j][j+1].tail = lower_right_b[j+1]
lower_right_ob[j][j+1].parent = lower_right_yo[j][j]
# gimbal
gimbal_lower_left_yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j) + '.gimbal.lower.left')
gimbal_lower_left_yo[j][j].head = lower_w[1]
gimbal_lower_left_yo[j][j].tail = gimbal_lower_left_o[j]
gimbal_lower_left_yo[j][j].parent = lower_yw[j][1]
gimbal_lower_left_ob[j][j+1] = amt.edit_bones.new('o' + str(j) + 'b'+ str(j+1) + '.gimbal.lower.left')
gimbal_lower_left_ob[j][j+1].head = gimbal_lower_left_o[j]
gimbal_lower_left_ob[j][j+1].tail = gimbal_lower_left_b[j+1]
gimbal_lower_left_ob[j][j+1].parent = gimbal_lower_left_yo[j][j]
gimbal_lower_right_yo[j][j] = amt.edit_bones.new('y' + str(j) + 'o'+ str(j) +'.gimbal.lower.right')
gimbal_lower_right_yo[j][j].head = lower_w[1]
gimbal_lower_right_yo[j][j].tail = gimbal_lower_right_o[j]
gimbal_lower_right_yo[j][j].parent = lower_yw[j][1]
gimbal_lower_right_ob[j][j+1] = amt.edit_bones.new('o' + str(j) + 'b'+ str(j+1) +'.gimbal.lower.right')
gimbal_lower_right_ob[j][j+1].head = gimbal_lower_right_o[j]
gimbal_lower_right_ob[j][j+1].tail = gimbal_lower_right_b[j+1]
gimbal_lower_right_ob[j][j+1].parent = gimbal_lower_right_yo[j][j]
# end
upper_left_by[j][j] = amt.edit_bones.new('b'+ str(j) + 'y'+ str(j) + '.upper.left')
upper_left_by[j][j].head = upper_left_b[j]
upper_left_by[j][j].tail = upper_left_y[j]
upper_left_by[j][j].parent = upper_left_ob[j-1][j]
upper_right_by[j][j] = amt.edit_bones.new('b'+ str(j) + 'y'+ str(j) + '.upper.right')
upper_right_by[j][j].head = upper_right_b[j]
upper_right_by[j][j].tail = upper_right_y[j]
upper_right_by[j][j].parent = upper_right_ob[j-1][j]
upper_yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j) + '.upper')
upper_yo[j][j].head = upper_y[j]
upper_yo[j][j].tail = upper_o[j]
upper_yo[j][j].parent = upper_yy[j-1][j]
upper_ob[j][j+1] = amt.edit_bones.new('o' + str(j) + 'b'+ str(j+1) + '.upper')
upper_ob[j][j+1].head = upper_w[1]
upper_ob[j][j+1].tail = upper_b[j+1]
upper_ob[j][j+1].parent = upper_yo[j][j]
# upper_left_ww[j][j+1] = amt.edit_bones.new('w' + str(j) + 'w'+ str(j+1) + '.upper.left')
# upper_left_ww[j][j+1].head = upper_left_w[j]
# upper_left_ww[j][j+1].tail = upper_left_w[j+1]
# upper_left_ww[j][j+1].parent = upper_left_ww[j-2][j]
# upper_right_ww[j][j+1] = amt.edit_bones.new('w' + str(j) + 'w'+ str(j+1) + '.upper.right')
# upper_right_ww[j][j+1].head = upper_right_w[j]
# upper_right_ww[j][j+1].tail = upper_right_w[j+1]
# upper_right_ww[j][j+1].parent = upper_right_ww[j-2][j]
j = 4
lower_left_by[j][j] = amt.edit_bones.new('b' + str(j) + 'y' + str(j) +'.lower.left')
lower_left_by[j][j].head = lower_left_b[j]
lower_left_by[j][j].tail = lower_left_y[j]
lower_left_by[j][j].parent = lower_left_ob[j-1][j]
lower_left_yy[j][j+1] = amt.edit_bones.new('y' + str(j) + 'y' + str(j+1) +'.lower.left')
lower_left_yy[j][j+1].head = lower_left_y[j]
lower_left_yy[j][j+1].tail = lower_left_y[j+1]
lower_left_yy[j][j+1].parent = lower_left_by[j][j]
# gimbal o4b5
gimbal_lower_left_ob[j][j+1] = amt.edit_bones.new('o' + str(j) + 'b'+ str(j+1) + '.gimbal.lower.left')
gimbal_lower_left_ob[j][j+1].head = gimbal_lower_left_o[j]
gimbal_lower_left_ob[j][j+1].tail = gimbal_lower_left_b[j+1]
gimbal_lower_left_ob[j][j+1].parent = lower_left_yo[j-1][j-1]
# gimbal w4w5
# gimbal_upper_left_ww[j][j+1] = amt.edit_bones.new('w' + str(j) + 'w'+ str(j+1) +'.gimbal.upper.left')
# gimbal_upper_left_ww[j][j+1].head = upper_w[j]
# gimbal_upper_left_ww[j][j+1].tail = upper_w[j+1]
# gimbal_upper_left_ww[j][j+1].parent = gimbal_upper_left_ww[j-2][j]
lower_right_by[j][j] = amt.edit_bones.new('b' + str(j) + 'y' + str(j) +'.lower.right')
lower_right_by[j][j].head = lower_right_b[j]
lower_right_by[j][j].tail = lower_right_y[j]
lower_right_by[j][j].parent = lower_right_ob[j-1][j]
lower_right_yy[j][j+1] = amt.edit_bones.new('y' + str(j) + 'y' + str(j+1) +'.lower.right')
lower_right_yy[j][j+1].head = lower_right_y[j]
lower_right_yy[j][j+1].tail = lower_right_y[j+1]
lower_right_yy[j][j+1].parent = lower_right_by[j][j]
# gimbal o4b5
gimbal_lower_right_ob[j][j+1] = amt.edit_bones.new('o' + str(j) + 'b'+ str(j+1) + '.gimbal.lower.right')
gimbal_lower_right_ob[j][j+1].head = gimbal_lower_right_o[j]
gimbal_lower_right_ob[j][j+1].tail = gimbal_lower_right_b[j+1]
gimbal_lower_right_ob[j][j+1].parent = lower_right_yo[j-1][j-1]
# gimbal w4w5
# gimbal_upper_right_ww[j][j+1] = amt.edit_bones.new('w' + str(j) + 'w'+ str(j+1) +'.gimbal.upper.right')
# gimbal_upper_right_ww[j][j+1].head = upper_w[j]
# gimbal_upper_right_ww[j][j+1].tail = upper_w[j+1]
# gimbal_upper_right_ww[j][j+1].parent = gimbal_upper_right_ww[j-2][j]
j = 5
# gimbal b5y5
gimbal_lower_right_by[j][j] = amt.edit_bones.new('b' + str(j) + 'y'+ str(j) + '.gimbal.lower.right')
gimbal_lower_right_by[j][j].head = gimbal_lower_right_b[j]
gimbal_lower_right_by[j][j].tail = gimbal_lower_right_y[j]
gimbal_lower_right_by[j][j].parent = gimbal_lower_right_ob[j-1][j]
# gimbal y5y6
gimbal_lower_right_yy[j][j+1] = amt.edit_bones.new('y' + str(j) + 'y'+ str(j+1) + '.gimbal.lower.right')
gimbal_lower_right_yy[j][j+1].head = gimbal_lower_right_y[j]
gimbal_lower_right_yy[j][j+1].tail = gimbal_lower_right_y[j+1]
gimbal_lower_right_yy[j][j+1].parent = gimbal_lower_right_by[j][j]
# gimbal b5y5
gimbal_lower_left_by[j][j] = amt.edit_bones.new('b' + str(j) + 'y'+ str(j) + '.gimbal.lower.left')
gimbal_lower_left_by[j][j].head = gimbal_lower_left_b[j]
gimbal_lower_left_by[j][j].tail = gimbal_lower_left_y[j]
gimbal_lower_left_by[j][j].parent = gimbal_lower_left_ob[j-1][j]
# gimbal y5y6
gimbal_lower_left_yy[j][j+1] = amt.edit_bones.new('y' + str(j) + 'y'+ str(j+1) + '.gimbal.lower.left')
gimbal_lower_left_yy[j][j+1].head = gimbal_lower_left_y[j]
gimbal_lower_left_yy[j][j+1].tail = gimbal_lower_left_y[j+1]
gimbal_lower_left_yy[j][j+1].parent = gimbal_lower_left_by[j][j]
# gimbal w5w6
# gimbal_upper_left_ww[j][j+1] = amt.edit_bones.new('w' + str(j) + 'w'+ str(j+1) + '.gimbal.upper.left')
# gimbal_upper_left_ww[j][j+1].head = upper_left_w[j]
# gimbal_upper_left_ww[j][j+1].tail = upper_left_w[j+1]
# gimbal_upper_left_ww[j][j+1].parent = gimbal_upper_left_ow[j-3][j]
# gimbal_upper_right_ww[j][j+1] = amt.edit_bones.new('w' + str(j) + 'w'+ str(j+1) + '.gimbal.upper.right')
# gimbal_upper_right_ww[j][j+1].head = upper_right_w[j]
# gimbal_upper_right_ww[j][j+1].tail = upper_right_w[j+1]
# gimbal_upper_right_ww[j][j+1].parent = gimbal_upper_right_ow[j-3][j]
# all bones select
#bpy.ops.pose.select_all(action="SELECT")
for b in amt.edit_bones:
b.select = True
bpy.ops.armature.calculate_roll(type='GLOBAL_NEG_Z')
for b in amt.edit_bones:
b.select = False
amt.edit_bones["o3b4.lower.left"].select = True
amt.edit_bones["b4y4.lower.left"].select = True
amt.edit_bones["y4y5.lower.left"].select = True
bpy.ops.armature.calculate_roll(type='GLOBAL_NEG_X')
for b in amt.edit_bones:
b.select = False
amt.edit_bones["o3b4.lower.right"].select = True
amt.edit_bones["b4y4.lower.right"].select = True
amt.edit_bones["y4y5.lower.right"].select = True
bpy.ops.armature.calculate_roll(type='GLOBAL_POS_X')
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint
j = 1
cns = rig.pose.bones['y' +str(j) +'a' +str(j+1)].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'a'+str(j+1)+'a'+str(j)
cns.chain_count = 2
cns.use_stretch = False
j = 2
cns = rig.pose.bones['b'+str(j) +'y'+str(j)+'.lower'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j)+'o'+str(j)+'.lower'
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['b'+str(j) +'y'+str(j)+'.upper'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j)+'o'+str(j)+'.upper.left'
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
# cns = rig.pose.bones['w' +str(j+1) +'w' +str(j)+'.upper.left'].constraints.new('IK')
# cns.name = 'Ik'
# cns.target = rig
# cns.subtarget = 'w' +str(j-1) +'w' +str(j+1)+'.upper.left'
# cns.chain_count = 1
# cns.use_stretch = False
# cns = rig.pose.bones['w' +str(j+1) +'w' +str(j)+'.upper.right'].constraints.new('IK')
# cns.name = 'Ik'
# cns.target = rig
# cns.subtarget = 'w' +str(j-1) +'w' +str(j+1)+'.upper.right'
# cns.chain_count = 1
# cns.use_stretch = False
j = 3
cns = rig.pose.bones['b'+str(j) +'y'+str(j)+'.upper.left'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j)+'o'+str(j)+'.upper'
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['b'+str(j) +'y'+str(j)+'.upper.right'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j)+'o'+str(j)+'.upper'
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['o'+str(j) +'b'+str(j+1)+'.gimbal.lower.left'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'b'+str(j+1)+'y'+str(j+1)+'.lower.left'
cns.pole_target = rig
cns.pole_subtarget = 'o'+str(j)+'b'+str(j+1)+'.lower.left'
cns.pole_angle = 0
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['o'+str(j) +'b'+str(j+1)+'.gimbal.lower.right'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'b'+str(j+1)+'y'+str(j+1)+'.lower.right'
cns.pole_target = rig
cns.pole_subtarget = 'o'+str(j)+'b'+str(j+1)+'.lower.right'
cns.pole_angle = math.radians(180)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
j = 4
cns = rig.pose.bones['b'+str(j) +'y'+str(j)+'.lower.right'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j+1)+'y'+str(j+2)+'.gimbal.lower.right'
cns.pole_target = rig
cns.pole_subtarget = 'b'+str(j+1)+'y'+str(j+1)+'.gimbal.lower.right'
cns.pole_angle = math.radians(0)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['b'+str(j) +'y'+str(j)+'.lower.left'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j+1)+'y'+str(j+2)+'.gimbal.lower.left'
cns.pole_target = rig
cns.pole_subtarget = 'b'+str(j+1)+'y'+str(j+1)+'.gimbal.lower.left'
cns.pole_angle = math.radians(180)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
# j = 5
# cns = rig.pose.bones['w'+str(j) +'w'+str(j+1)+'.gimbal.upper.left'].constraints.new('IK')
# cns.name = 'Ik'
# cns.target = rig
# cns.subtarget = 'w'+str(j-2)+'w'+str(j-1)+'.upper.left'
# cns.pole_target = rig
# cns.pole_subtarget = 'w'+str(j-4)+'w'+str(j-2)+'.upper.left'
# cns.pole_angle = math.radians(90)
# cns.iterations = 500
# cns.chain_count = 2
# cns.use_stretch = False
# cns = rig.pose.bones['w'+str(j) +'w'+str(j+1)+'.gimbal.upper.right'].constraints.new('IK')
# cns.name = 'Ik'
# cns.target = rig
# cns.subtarget = 'w'+str(j-2)+'w'+str(j-1)+'.upper.right'
# cns.pole_target = rig
# cns.pole_subtarget = 'w'+str(j-4)+'w'+str(j-2)+'.upper.right'
# cns.pole_angle = math.radians(90)
# cns.iterations = 500
# cns.chain_count = 2
# cns.use_stretch = False
bpy.ops.object.mode_set(mode='OBJECT')
def configLink(self, A, J, helicity, rig, move, part):
bpy.ops.object.mode_set(mode='OBJECT')
Q = (0.18648+0.146446)*A
# Z = -Q*2
Z = 0.0
n = 1
obj_joint = bpy.data.objects["joint.gold.body.E"].copy()
obj_joint.location = (0.0, 0.0, -Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = 'a'+ str(n+1)+'a'+ str(n)+ ".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = 'a'+ str(n)+'b'+ str(n)+ ".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.silver.001"].copy()
obj_joint.location = (0.0, 0.0, +Q+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = 'y'+ str(n)+'a'+ str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, +Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = 'a'+ str(n+1)+'o'+ str(n)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2 + Q*(n % 2)*6 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".lower.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, +Q*(1 - (n % 2))*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".lower.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2 + Q*(n % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".upper.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, +Q*(1 - (n % 2))*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".upper.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
n = 2
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".lower.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, +Q*(1 - (n % 2))*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".lower.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*(n % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"o"+str(n)+".lower.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".upper.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, +Q*(1 - (n % 2))*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".upper.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*(n % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"o"+str(n)+".upper.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".upper.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*(n % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"o"+str(n)+".upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
# obj_joint = bpy.data.objects["joint.gold.B.R"].copy()
# obj_joint.location = (0.0, 0.0, 0.0)
# obj_joint.scale = (A, A, A)
# obj_joint.name = "o"+str(n)+"w"+str(n-1)+".upper.left.mesh." + move + '.' + part +'.' + helicity
# bpy.context.scene.objects.link(obj_joint)
# obj_joint = bpy.data.objects["joint.cursor"].copy()
# obj_joint.location = (0.0, 0.0, 0.0)
# obj_joint.scale = (A, A, A)
# obj_joint.name = "w"+str(n-1)+"w"+str(n+1)+".upper.left.mesh." + move + '.' + part +'.' + helicity
# bpy.context.scene.objects.link(obj_joint)
# obj_joint = bpy.data.objects["joint.cursor"].copy()
# obj_joint.location = (0.0, 0.0, 0.0)
# obj_joint.scale = (A, A, A)
# obj_joint.name = "w"+str(n+1)+"w"+str(n+2)+".upper.left.mesh." + move + '.' + part +'.' + helicity
# bpy.context.scene.objects.link(obj_joint)
# w2w5.gimbal
# obj_joint = bpy.data.objects["joint.gold.g1.y.C.R2"].copy()
# obj_joint.location = (0.0, 0.0, 0.0)
# obj_joint.scale = (A, A, A)
# obj_joint.name = "o"+str(n)+"w"+str(n+3)+".gimbal.upper.left.mesh." + move + '.' + part +'.' + helicity
# bpy.context.scene.objects.link(obj_joint)
# w5w6.gimbal
# obj_joint = bpy.data.objects["joint.silver.g1.z.C.R2"].copy()
# obj_joint.location = (0.0, 0.0, 0.0)
# obj_joint.scale = (A, A, A)
# obj_joint.name = "w"+str(n+3)+"w"+str(n+4)+".gimbal.upper.left.mesh." + move + '.' + part +'.' + helicity
# bpy.context.scene.objects.link(obj_joint)
# obj_joint = bpy.data.objects["joint.gold.B.L"].copy()
# obj_joint.location = (0.0, 0.0, 0.0)
# obj_joint.scale = (A, A, A)
# obj_joint.name = "o"+str(n)+"w"+str(n-1)+".upper.right.mesh." + move + '.' + part +'.' + helicity
# bpy.context.scene.objects.link(obj_joint)
# obj_joint = bpy.data.objects["joint.cursor"].copy()
# obj_joint.location = (0.0, 0.0, 0.0)
# obj_joint.scale = (A, A, A)
# obj_joint.name = "w"+str(n-1)+"w"+str(n+1)+".upper.right.mesh." + move + '.' + part +'.' + helicity
# bpy.context.scene.objects.link(obj_joint)
# obj_joint = bpy.data.objects["joint.cursor"].copy()
# obj_joint.location = (0.0, 0.0, 0.0)
# obj_joint.scale = (A, A, A)
# obj_joint.name = "w"+str(n+1)+"w"+str(n+2)+".upper.right.mesh." + move + '.' + part +'.' + helicity
# bpy.context.scene.objects.link(obj_joint)
# w2w5.gimbal
# obj_joint = bpy.data.objects["joint.gold.g1.y.C.L2"].copy()
# obj_joint.location = (0.0, 0.0, 0.0)
# obj_joint.scale = (A, A, A)
# obj_joint.name = "o"+str(n)+"w"+str(n+3)+".gimbal.upper.right.mesh." + move + '.' + part +'.' + helicity
# bpy.context.scene.objects.link(obj_joint)
# w5w6.gimbal
# obj_joint = bpy.data.objects["joint.silver.g1.z.C.L2"].copy()
# obj_joint.location = (0.0, 0.0, 0.0)
# obj_joint.scale = (A, A, A)
# obj_joint.name = "w"+str(n+3)+"w"+str(n+4)+".gimbal.upper.right.mesh." + move + '.' + part +'.' + helicity
# bpy.context.scene.objects.link(obj_joint)
# w2w3.upper
# obj_joint = bpy.data.objects["joint.gold.B2"].copy()
# obj_joint.location = (0.0, 0.0, 0.0)
# obj_joint.scale = (A, A, A)
# obj_joint.name = "w"+str(n)+"w"+str(n+1)+".upper.mesh." + move + '.' + part +'.' + helicity
# bpy.context.scene.objects.link(obj_joint)
# w2w4.gimbal.upper.left
# obj_joint = bpy.data.objects["joint.gold.g1.y.C.L"].copy()
# obj_joint.location = (0.0, 0.0, 0.0)
# obj_joint.scale = (A, A, A)
# obj_joint.name = "w"+str(n)+"w"+str(n+2)+".gimbal.upper.left.mesh." + move + '.' + part +'.' + helicity
# bpy.context.scene.objects.link(obj_joint)
# w4w5.gimbal.upper.left
# obj_joint = bpy.data.objects["joint.silver.g1.z.C.L"].copy()
# obj_joint.location = (0.0, 0.0, 0.0)
# obj_joint.scale = (A, A, A)
# obj_joint.name = "w"+str(n+2)+"w"+str(n+3)+".gimbal.upper.left.mesh." + move + '.' + part +'.' + helicity
# bpy.context.scene.objects.link(obj_joint)
# w3w2.upper.left
# obj_joint = bpy.data.objects["joint.cursor"].copy()
# obj_joint.location = (0.0, 0.0, 0.0)
# obj_joint.scale = (A, A, A)
# obj_joint.name = "w"+str(n+1)+"w"+str(n)+".upper.left.mesh." + move + '.' + part +'.' + helicity
# bpy.context.scene.objects.link(obj_joint)
# w2w1.upper.left
# obj_joint = bpy.data.objects["joint.cursor"].copy()
# obj_joint.location = (0.0, 0.0, 0.0)
# obj_joint.scale = (A, A, A)
# obj_joint.name = "w"+str(n)+"w"+str(n-1)+".upper.left.mesh." + move + '.' + part +'.' + helicity
# bpy.context.scene.objects.link(obj_joint)
# w2w4.gimbal.upper.right
# obj_joint = bpy.data.objects["joint.gold.g1.y.C.R"].copy()
# obj_joint.location = (0.0, 0.0, 0.0)
# obj_joint.scale = (A, A, A)
# obj_joint.name = "w"+str(n)+"w"+str(n+2)+".gimbal.upper.right.mesh." + move + '.' + part +'.' + helicity
# bpy.context.scene.objects.link(obj_joint)
# w4w5.gimbal.upper.right
# obj_joint = bpy.data.objects["joint.silver.g1.z.C.R"].copy()
# obj_joint.location = (0.0, 0.0, 0.0)
# obj_joint.scale = (A, A, A)
# obj_joint.name = "w"+str(n+2)+"w"+str(n+3)+".gimbal.upper.right.mesh." + move + '.' + part +'.' + helicity
# bpy.context.scene.objects.link(obj_joint)
# w3w2.upper.right
# obj_joint = bpy.data.objects["joint.cursor"].copy()
# obj_joint.location = (0.0, 0.0, 0.0)
# obj_joint.scale = (A, A, A)
# obj_joint.name = "w"+str(n+1)+"w"+str(n)+".upper.right.mesh." + move + '.' + part +'.' + helicity
# bpy.context.scene.objects.link(obj_joint)
# w2w1.upper.right
# obj_joint = bpy.data.objects["joint.cursor"].copy()
# obj_joint.location = (0.0, 0.0, 0.0)
# obj_joint.scale = (A, A, A)
# obj_joint.name = "w"+str(n)+"w"+str(n-1)+".upper.right.mesh." + move + '.' + part +'.' + helicity
# bpy.context.scene.objects.link(obj_joint)
n = 3
obj_joint = bpy.data.objects["joint.gold.A"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"w"+str(1)+".lower.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.A"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"o"+str(n)+".lower.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.B"].copy()
# obj_joint.location = (0.0, 0.0, -Q*2 + Q*(n % 2)*6 +Z)
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".lower.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.A"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"o"+str(n)+".lower.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.B"].copy()
# obj_joint.location = (0.0, 0.0, -Q*2 + Q*(n % 2)*6 +Z)
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".lower.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.g1.y.B"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"o"+str(n)+".gimbal.lower.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.g1.z.B"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".gimbal.lower.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.g1.y.B"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"o"+str(n)+".gimbal.lower.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.g1.z.B"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".gimbal.lower.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".upper.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".upper.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*(n % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"o"+str(n)+".upper.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2 + Q*(n % 2)*6 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".upper.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
n = 4
obj_joint = bpy.data.objects["joint.green.leg-left.A"].copy()
# obj_joint.location = (0.0, 0.0, Q +Z)
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".lower.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.cursor"].copy()
obj_joint.location = (0.0, 0.0, Q/2 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".lower.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.green.leg-right.A"].copy()
# obj_joint.location = (0.0, 0.0, Q +Z)
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".lower.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.cursor"].copy()
obj_joint.location = (0.0, 0.0, Q/2 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".lower.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.cursor"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".gimbal.lower.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.cursor"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".gimbal.lower.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
n = 5
obj_joint = bpy.data.objects["joint.cursor"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".gimbal.lower.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.cursor"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".gimbal.lower.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.cursor"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".gimbal.lower.left.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.cursor"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".gimbal.lower.right.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for ob in context.scene.objects:
if "mesh" in ob.name:
ob.select = True
bpy.ops.object.make_single_user(type='SELECTED_OBJECTS', object=True, obdata=True, material=True, texture=True, animation=True)
bpy.context.scene.cursor_location = (0.0, 0.0, 0.0)
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
# Parent set arms to body
def setParent(self, helicity, move, rig,
arm_left_loc, arm_left_rot, arm_left,
arm_right_loc, arm_right_rot, arm_right):
bpy.ops.object.mode_set(mode='OBJECT')
bpy.context.scene.frame_current = 0
# arm left
bpy.ops.object.select_all(action='DESELECT')
rig.select = True
bpy.context.scene.objects.active = rig
bpy.ops.object.editmode_toggle()
## parent_bone = 'o2b3.upper.left' # choose the bone name which you want to be the parent
# parent_bone = 'o2w5.gimbal.upper.left' # choose the bone name which you want to be the parent
parent_bone = 'o2b3.upper.left' # choose the bone name which you want to be the parent
rig.data.edit_bones.active = rig.data.edit_bones[parent_bone]
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
arm_left.rig.select = True
rig.select = True
bpy.context.scene.objects.active = rig #the active object will be the parent of all selected object
bpy.ops.object.parent_set(type='BONE', keep_transform=True)
# arm left end
# arm right
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
rig.select = True
bpy.context.scene.objects.active = rig
bpy.ops.object.editmode_toggle()
## parent_bone = 'o2b3.upper.right' # choose the bone name which you want to be the parent
# parent_bone = 'o2w5.gimbal.upper.right' # choose the bone name which you want to be the parent
parent_bone = 'o2b3.upper.right' # choose the bone name which you want to be the parent
rig.data.edit_bones.active = rig.data.edit_bones[parent_bone]
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
arm_right.rig.select = True
rig.select = True
bpy.context.scene.objects.active = rig #the active object will be the parent of all selected object
bpy.ops.object.parent_set(type='BONE', keep_transform=True)
# arm right end
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
# arms position
arm_left.rig.location = arm_left_loc
arm_left.rig.rotation_euler = arm_left_rot
arm_right.rig.location = arm_right_loc
arm_right.rig.rotation_euler = arm_right_rot
### grab the long pole with the right hand
arm_left.rig.select = True
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint y5 -> right hand
cns = arm_left.rig.pose.bones['y5w1'].constraints.new('DAMPED_TRACK')
cns.name = 'Damped Track'
cns.target = arm_right.rig
cns.subtarget = 'y3y4'
cns.head_tail = 1
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
arm_right.rig.select = True
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint y5 -> right hand
cns = arm_right.rig.pose.bones['y4w1'].constraints.new('DAMPED_TRACK')
cns.name = 'Damped Track'
cns.target = arm_left.rig
cns.subtarget = 'y4y5'
cns.head_tail = 1
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
class LeftArm(Formula):
J = 6 #joint number
# Overriding
def __init__(self, P, A, move, part, helicity, start, end):
global interval
global frame_start
global frame_end
self.interval = interval
self.frame_start = frame_start
self.frame_end = frame_end
# pivot factor
self.P = P
# scale factor
self.A = A
# name
self.move = move
# element
self.part = part
# element helicity
self.helicity = helicity
self.start = start
self.end = end
bpy.ops.object.mode_set(mode='OBJECT')
# Create armature and object
self.amt = bpy.data.armatures.new(move + '.' + part + '.' + helicity + '.data')
self.rig = bpy.data.objects.new(move + '.' + part + '.' + helicity, self.amt)
# Joints
self.a = [0 for i in range(4)] # Joint α
self.b = [0 for i in range(self.J)] # Joint β
self.y = [0 for i in range(self.J+1)] # Joint γ
self.o = [0 for i in range(self.J)] # Joint δ
self.w = [0 for i in range(self.J)] # Joint ω
# Configuration Movement
self.configMovement(self.P, self.A, self.J, self.a, self.b, self.y, self.o, self.w)
# Construction Movement
self.constructMovement(self.J, self.helicity, self.amt, self.rig, self.a, self.b, self.y, self.o, self.w)
# Construction Rotation
self.configRotation(self.rig, self.interval, self.frame_start, self.frame_end, self.start, self.end)
# Configuration Linkage
self.configLink(self.A, self.J, self.helicity, self.rig, self.move, self.part)
# Construction Linkage
self.constructLink(self.A, self.J, self.helicity, self.rig, self.move, self.part)
# Overriding Configuration Movement
def configMovement(self, P, A, J, a, b, y, o, w):
a[1] = mathutils.Euler((P, A, 0), 'XYZ')
print ("a1 =", a[1])
a[2] = mathutils.Euler((A, -A, 0), 'XYZ')
print ("a2 =", a[2])
b[1] = mathutils.Euler((-A, A, 0), 'XYZ')
print ("b1 =", b[1])
o[1] = mathutils.Euler((A, A, 0), 'XYZ')
print ("o1 =", o[1])
B = A * 2 * sqrt (2)
C = B + (B * sqrt (2))
D = C * sqrt (2)
E = C + D
y[1] = mathutils.Euler((-A, -A, 0), 'XYZ')
print ("y1 =", y[1])
y[2] = mathutils.Euler((0.405432, -0.871271, 0), 'XYZ')
print ("y2 =", y[2])
b[2] = mathutils.Euler((1.10083, -0.097564, 0), 'XYZ')
print ("b2 =", b[2])
o[2] = mathutils.Euler((-1.08269, -0.547078, 0), 'XYZ')
print ("o2 =", o[2])
y[3] = mathutils.Euler((6.93556, 0.572048, 0), 'XYZ')
print ("y3 =", y[3])
b[3] = mathutils.Euler((6.51657, 0.095689, 0), 'XYZ')
print ("b3 =", b[3])
o[3] = mathutils.Euler((6.46188, 0.529683, 0), 'XYZ')
print ("o3 =", o[3])
y[4] = mathutils.Euler((11.8048, 3.1013, 0), 'XYZ')
print ("y4 =", y[4])
b[4] = mathutils.Euler((11.6423, 2.42679, 0), 'XYZ')
print ("b4 =", b[4])
o[4] = mathutils.Euler((11.63, 3.33272, 0), 'XYZ')
print ("o4 =", o[4])
y[5] = mathutils.Euler((13.5459, 4.00668, 0), 'XYZ')
print ("y5 =", y[5])
w[1] = mathutils.Euler((13.5459, 4.00668, 68.3593), 'XYZ')
print ("w1 =", w[1])
w[2] = mathutils.Euler((13.4455, 4.13703, 68.3593), 'XYZ')
print ("w2 =", w[2])
y[6] = mathutils.Euler((13.7673, 3.10131, 0), 'XYZ')
print ("y6 =", y[6])
w[3] = mathutils.Euler((13.7673, 3.10131, 68.3593), 'XYZ')
print ("w3 =", w[3])
def constructMovement(self, J, helicity, amt, rig, a, b, y, o, w):
# Linkages
aa = [[0 for i in range(4)] for j in range(4)] # Link α(i) - α(j)
ab = [[0 for i in range(4)] for j in range(4)] # Link α(i) - β(j)
ya = [[0 for i in range(4)] for j in range(4)] # Link γ(i) - α(j)
ao = [[0 for i in range(4)] for j in range(4)] # Link α(i) - δ(j)
ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
yy = [[0 for i in range(self.J+1)] for j in range(self.J)] # Link γ(i) - γ(j)
by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
yw = [[0 for i in range(self.J)] for j in range(self.J+1)] # Link γ(i) - ω(j)
ww = [[0 for i in range(self.J)] for j in range(self.J)] # Link ω(i) - ω(j)
rig.location = mathutils.Euler((0.0, 0.0, 0.0), 'XYZ')
rig.show_x_ray = True
amt.show_names = True
amt.draw_type = 'STICK'
# amt.draw_type = 'BBONE'
# Link object to scene
scn = bpy.context.scene
scn.objects.link(rig)
scn.objects.active = rig
scn.update()
# Edit
bpy.ops.object.editmode_toggle()
# Construction Linkage
aa[2][1] = amt.edit_bones.new('a2a1')
aa[2][1].head = a[2]
aa[2][1].tail = a[1]
ab[1][1] = amt.edit_bones.new('a1b1')
ab[1][1].head = a[1]
ab[1][1].tail = b[1]
ab[1][1].parent = aa[2][1]
by[1][1] = amt.edit_bones.new('b1y1')
by[1][1].head = b[1]
by[1][1].tail = y[1]
by[1][1].parent = ab[1][1]
by[1][1].use_inherit_rotation = False
ya[1][2] = amt.edit_bones.new('y1a2')
ya[1][2].head = y[1]
ya[1][2].tail = a[2]
ya[1][2].parent = by[1][1]
ao[2][1] = amt.edit_bones.new('a2o1')
ao[2][1].head = a[2]
ao[2][1].tail = o[1]
ao[2][1].parent = ya[1][2]
ob[1][2] = amt.edit_bones.new('o1b2')
ob[1][2].head = o[1]
ob[1][2].tail = b[2]
ob[1][2].parent = ao[2][1]
yy[1][2] = amt.edit_bones.new('y1y2')
yy[1][2].head = y[1]
yy[1][2].tail = y[2]
yy[1][2].parent = by[1][1]
for j in range(2, J - 1):
by[j][j] = amt.edit_bones.new('b'+ str(j) + 'y'+ str(j))
by[j][j].head = b[j]
by[j][j].tail = y[j]
by[j][j].parent = ob[j-1][j]
yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j))
yo[j][j].head = y[j]
yo[j][j].tail = o[j]
yo[j][j].parent = yy[j-1][j]
yy[j][j+1] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+1))
yy[j][j+1].head = y[j]
yy[j][j+1].tail = y[j+1]
yy[j][j+1].parent = by[j][j]
if j < (J-2):
ob[j][j+1] = amt.edit_bones.new('o'+ str(j) + 'b'+ str(j+1))
ob[j][j+1].head = o[j]
ob[j][j+1].tail = b[j+1]
ob[j][j+1].parent = yo[j][j]
yw[5][1] = amt.edit_bones.new('y5w1')
yw[5][1].head = y[5]
yw[5][1].tail = w[1]
yw[5][1].parent = yy[4][5]
ww[1][2] = amt.edit_bones.new('w1w2')
ww[1][2].head = w[1]
ww[1][2].tail = w[2]
ww[1][2].parent = yw[5][1]
yy[4][6] = amt.edit_bones.new('y4y6.gimbal')
yy[4][6].head = y[4]
yy[4][6].tail = y[6]
yy[4][6].parent = by[4][4]
yw[6][3] = amt.edit_bones.new('y6w3.gimbal')
yw[6][3].head = y[6]
yw[6][3].tail = w[3]
yw[6][3].parent = yy[4][6]
# all bones select
#bpy.ops.pose.select_all(action="SELECT")
for b in amt.edit_bones:
b.select = True
if helicity == 'right':
bpy.ops.armature.calculate_roll(type='GLOBAL_POS_Z')
else:
bpy.ops.armature.calculate_roll(type='GLOBAL_NEG_Z')
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint
cns = rig.pose.bones['y1a2'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'a2a1'
cns.chain_count = 2
cns.use_stretch = False
for j in range(2, J - 1):
cns = rig.pose.bones['b'+str(j) +'y'+str(j)].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j)+'o'+str(j)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['y6w3.gimbal'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'w1w2'
cns.pole_target = rig
cns.pole_subtarget = 'y5w1'
cns.pole_angle = math.radians(90)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
bpy.ops.object.mode_set(mode='OBJECT')
def configLink(self, A, J, helicity, rig, move, part):
bpy.ops.object.mode_set(mode='OBJECT')
Q = (0.18648+0.146446)*A
# Z = -Q*2
Z = 0.0
obj_joint = bpy.data.objects["joint.gold.arm-left.006"].copy()
obj_joint.location = (0.0, 0.0, -Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2a1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.silver.002"].copy()
obj_joint.location = (0.0, 0.0, +Q*(3+4)+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y1a2.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, +Q*(1+4)+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2o1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a1b1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for n in range(1, J - 1):
if n <= (J-2):
# Pattern 2 of by
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
if n <= (J-3):
# Pattern 2 of yy
obj_joint = bpy.data.objects["joint.gold.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, +Q*(1 - (n % 2))*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
if n <= (J-3):
# Pattern 1 of ob
obj_joint = bpy.data.objects["joint.blue.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, -Q*4 + Q*(n % 2)*8 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
# Pattern 2 of yo
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n+1)+"o"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y4y5.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y4y5.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y5w1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "w1w2.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.green.y4y6.gimbal.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y4y6.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y6w3.gimbal.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y6w3.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for ob in context.scene.objects:
if "mesh" in ob.name:
ob.select = True
bpy.ops.object.make_single_user(type='SELECTED_OBJECTS', object=True, obdata=True, material=True, texture=True, animation=True)
bpy.context.scene.cursor_location = (0.0, 0.0, 0.0)
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
class RightArm(Formula):
J = 5 #joint number
# Overriding
def __init__(self, P, A, move, part, helicity, start, end):
global interval
global frame_start
global frame_end
self.interval = interval
self.frame_start = frame_start
self.frame_end = frame_end
# pivot factor
self.P = P
# scale factor
self.A = A
# name
self.move = move
# element
self.part = part
# element helicity
self.helicity = helicity
self.start = start
self.end = end
bpy.ops.object.mode_set(mode='OBJECT')
# Create armature and object
self.amt = bpy.data.armatures.new(move + '.' + part + '.' + helicity + '.data')
self.rig = bpy.data.objects.new(move + '.' + part + '.' + helicity, self.amt)
# Joints
self.a = [0 for i in range(4)] # Joint α
self.b = [0 for i in range(self.J)] # Joint β
self.y = [0 for i in range(self.J+1)] # Joint γ
self.o = [0 for i in range(self.J)] # Joint δ
self.w = [0 for i in range(self.J)] # Joint ω
# Configuration Movement
self.configMovement(self.P, self.A, self.J, self.a, self.b, self.y, self.o, self.w)
# Construction Movement
self.constructMovement(self.J, self.helicity, self.amt, self.rig, self.a, self.b, self.y, self.o, self.w)
# Construction Rotation
self.configRotation(self.rig, self.interval, self.frame_start, self.frame_end, self.start, self.end)
# Configuration Linkage
self.configLink(self.A, self.J, self.helicity, self.rig, self.move, self.part)
# Construction Linkage
self.constructLink(self.A, self.J, self.helicity, self.rig, self.move, self.part)
# Overriding Configuration Movement
def configMovement(self, P, A, J, a, b, y, o, w):
a[1] = mathutils.Euler((P, A, 0), 'XYZ')
print ("a1 =", a[1])
a[2] = mathutils.Euler((A, -A, 0), 'XYZ')
print ("a2 =", a[2])
b[1] = mathutils.Euler((-A, A, 0), 'XYZ')
print ("b1 =", b[1])
o[1] = mathutils.Euler((A, A, 0), 'XYZ')
print ("o1 =", o[1])
B = A * 2 * sqrt (2)
C = B + (B * sqrt (2))
D = C * sqrt (2)
E = C + D
y[1] = mathutils.Euler((-A, -A, 0), 'XYZ')
print ("y1 =", y[1])
y[2] = mathutils.Euler((5.23572, -4.00436, 0), 'XYZ')
print ("y2 =", y[2])
b[2] = mathutils.Euler((5.80363, -3.26182, 0), 'XYZ')
print ("b2 =", b[2])
o[2] = mathutils.Euler((4.64316, -3.47289, 0), 'XYZ')
print ("o2 =", o[2])
y[3] = mathutils.Euler((10.4745, -5.64019, 0), 'XYZ')
print ("y3 =", y[3])
b[3] = mathutils.Euler((9.87603, -5.84587, 0), 'XYZ')
print ("b3 =", b[3])
o[3] = mathutils.Euler((10.042, -5.44267, 0), 'XYZ')
print ("o3 =", o[3])
y[4] = mathutils.Euler((12.3316, -5.71339, 0), 'XYZ')
print ("y4 =", y[4])
w[1] = mathutils.Euler((12.3316, -5.71339, -1.0), 'XYZ')
print ("w1 =", w[1])
w[2] = mathutils.Euler((12.3316, -5.61339, -1.0), 'XYZ')
print ("w2 =", w[2])
y[5] = mathutils.Euler((12.333, -5.6402, 0), 'XYZ')
print ("y5 =", y[5])
w[3] = mathutils.Euler((12.333, -5.6402, -1.0), 'XYZ')
print ("w3 =", w[3])
def constructMovement(self, J, helicity, amt, rig, a, b, y, o, w):
# Linkages
aa = [[0 for i in range(4)] for j in range(4)] # Link α(i) - α(j)
ab = [[0 for i in range(4)] for j in range(4)] # Link α(i) - β(j)
ya = [[0 for i in range(4)] for j in range(4)] # Link γ(i) - α(j)
ao = [[0 for i in range(4)] for j in range(4)] # Link α(i) - δ(j)
ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
yy = [[0 for i in range(self.J+1)] for j in range(self.J)] # Link γ(i) - γ(j)
by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
yw = [[0 for i in range(self.J)] for j in range(self.J+1)] # Link γ(i) - ω(j)
ww = [[0 for i in range(self.J)] for j in range(self.J)] # Link ω(i) - ω(j)
rig.location = mathutils.Euler((0.0, 0.0, 0.0), 'XYZ')
rig.show_x_ray = True
amt.show_names = True
amt.draw_type = 'STICK'
# amt.draw_type = 'BBONE'
# Link object to scene
scn = bpy.context.scene
scn.objects.link(rig)
scn.objects.active = rig
scn.update()
# Edit
bpy.ops.object.editmode_toggle()
# Construction Linkage
aa[2][1] = amt.edit_bones.new('a2a1')
aa[2][1].head = a[2]
aa[2][1].tail = a[1]
ab[1][1] = amt.edit_bones.new('a1b1')
ab[1][1].head = a[1]
ab[1][1].tail = b[1]
ab[1][1].parent = aa[2][1]
by[1][1] = amt.edit_bones.new('b1y1')
by[1][1].head = b[1]
by[1][1].tail = y[1]
by[1][1].parent = ab[1][1]
by[1][1].use_inherit_rotation = False
ya[1][2] = amt.edit_bones.new('y1a2')
ya[1][2].head = y[1]
ya[1][2].tail = a[2]
ya[1][2].parent = by[1][1]
ao[2][1] = amt.edit_bones.new('a2o1')
ao[2][1].head = a[2]
ao[2][1].tail = o[1]
ao[2][1].parent = ya[1][2]
ob[1][2] = amt.edit_bones.new('o1b2')
ob[1][2].head = o[1]
ob[1][2].tail = b[2]
ob[1][2].parent = ao[2][1]
yy[1][2] = amt.edit_bones.new('y1y2')
yy[1][2].head = y[1]
yy[1][2].tail = y[2]
yy[1][2].parent = by[1][1]
for j in range(2, J - 1):
by[j][j] = amt.edit_bones.new('b'+ str(j) + 'y'+ str(j))
by[j][j].head = b[j]
by[j][j].tail = y[j]
by[j][j].parent = ob[j-1][j]
yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j))
yo[j][j].head = y[j]
yo[j][j].tail = o[j]
yo[j][j].parent = yy[j-1][j]
yy[j][j+1] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+1))
yy[j][j+1].head = y[j]
yy[j][j+1].tail = y[j+1]
yy[j][j+1].parent = by[j][j]
if j < (J-2):
ob[j][j+1] = amt.edit_bones.new('o'+ str(j) + 'b'+ str(j+1))
ob[j][j+1].head = o[j]
ob[j][j+1].tail = b[j+1]
ob[j][j+1].parent = yo[j][j]
yw[4][1] = amt.edit_bones.new('y4w1')
yw[4][1].head = y[4]
yw[4][1].tail = w[1]
yw[4][1].parent = yy[3][4]
ww[1][2] = amt.edit_bones.new('w1w2')
ww[1][2].head = w[1]
ww[1][2].tail = w[2]
ww[1][2].parent = yw[4][1]
yy[3][5] = amt.edit_bones.new('y3y5.gimbal')
yy[3][5].head = y[3]
yy[3][5].tail = y[5]
yy[3][5].parent = by[3][3]
yw[5][3] = amt.edit_bones.new('y5w3.gimbal')
yw[5][3].head = y[5]
yw[5][3].tail = w[3]
yw[5][3].parent = yy[3][5]
# all bones select
#bpy.ops.pose.select_all(action="SELECT")
for b in amt.edit_bones:
b.select = True
if helicity == 'right':
bpy.ops.armature.calculate_roll(type='GLOBAL_POS_Z')
else:
bpy.ops.armature.calculate_roll(type='GLOBAL_NEG_Z')
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint
cns = rig.pose.bones['y1a2'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'a2a1'
cns.chain_count = 2
cns.use_stretch = False
for j in range(2, J - 1):
cns = rig.pose.bones['b'+str(j) +'y'+str(j)].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j)+'o'+str(j)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['y5w3.gimbal'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'w1w2'
cns.pole_target = rig
cns.pole_subtarget = 'y4w1'
cns.pole_angle = math.radians(90)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
bpy.ops.object.mode_set(mode='OBJECT')
def configLink(self, A, J, helicity, rig, move, part):
bpy.ops.object.mode_set(mode='OBJECT')
Q = (0.18648+0.146446)*A
# Z = -Q*2
Z = 0.0
obj_joint = bpy.data.objects["joint.gold.arm-right.006"].copy()
obj_joint.location = (0.0, 0.0, -Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2a1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.silver.002"].copy()
obj_joint.location = (0.0, 0.0, +Q*(3+4)+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y1a2.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, +Q*(1+4)+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2o1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a1b1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for n in range(1, J - 1):
if n <= (J-2):
# Pattern 2 of by
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
if n <= (J-3):
# Pattern 2 of yy
obj_joint = bpy.data.objects["joint.gold.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, +Q*(1 - (n % 2))*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
if n <= (J-3):
# Pattern 1 of ob
obj_joint = bpy.data.objects["joint.blue.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, -Q*4 + Q*(n % 2)*8 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
# Pattern 2 of yo
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n+1)+"o"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y3y4.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y3y4.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y4w1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "w1w2.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y3y5.gimbal.000"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y3y5.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y5w3.gimbal.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y5w3.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for ob in context.scene.objects:
if "mesh" in ob.name:
ob.select = True
bpy.ops.object.make_single_user(type='SELECTED_OBJECTS', object=True, obdata=True, material=True, texture=True, animation=True)
bpy.context.scene.cursor_location = (0.0, 0.0, 0.0)
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
class LeftLeg(Formula):
J = 7 #joint number
# Overriding
def __init__(self, P, A, move, part, helicity, start, end,
leg_left_loc, leg_left_rot,
leg_right_loc, leg_right_rot, leg_right,
pitch_loc, pitch_rot, pitch):
global interval
global frame_start
global frame_end
self.interval = interval
self.frame_start = frame_start
self.frame_end = frame_end
# pivot factor
self.P = P
# scale factor
self.A = A
# name
self.move = move
# element
self.part = part
# element helicity
self.helicity = helicity
self.start = start
self.end = end
# leg_left
self.leg_left_loc = leg_left_loc
self.leg_left_rot = leg_left_rot
# leg_right
self.leg_right_loc = leg_right_loc
self.leg_right_rot = leg_right_rot
self.leg_right = leg_right
# pitch
self.pitch_loc = pitch_loc
self.pitch_rot = pitch_rot
self.pitch = pitch
# body
self.body = body
bpy.ops.object.mode_set(mode='OBJECT')
# Create armature and object
self.amt = bpy.data.armatures.new(move + '.' + part + '.' + helicity + '.data')
self.rig = bpy.data.objects.new(move + '.' + part + '.' + helicity, self.amt)
# Joints
self.a = [0 for i in range(4)] # Joint α
self.b = [0 for i in range(self.J)] # Joint β
self.y = [0 for i in range(self.J)] # Joint γ
self.o = [0 for i in range(self.J)] # Joint δ
self.w = [0 for i in range(self.J)] # Joint ω
# Configuration Movement
self.configMovement(self.P, self.A, self.J, self.a, self.b, self.y, self.o, self.w)
# Construction Movement
self.constructMovement(self.J, self.helicity, self.amt, self.rig, self.a, self.b, self.y, self.o, self.w)
# Parent set pitch and right leg to left leg
self.setParent(self.helicity, self.move, self.rig,
self.leg_left_loc, self.leg_left_rot,
self.leg_right_loc, self.leg_right_rot, self.leg_right,
self.pitch_loc, self.pitch_rot, self.pitch)
# Construction Rotation
self.configRotation(self.rig, self.interval, self.frame_start, self.frame_end, self.start, self.end)
# Configuration Linkage
self.configLink(self.A, self.J, self.helicity, self.rig, self.move, self.part)
# Construction Linkage
self.constructLink(self.A, self.J, self.helicity, self.rig, self.move, self.part)
# Overriding Configuration Movement
def configMovement(self, P, A, J, a, b, y, o, w):
a[1] = mathutils.Euler((P, A, 0), 'XYZ')
print ("a1 =", a[1])
a[2] = mathutils.Euler((A, -A, 0), 'XYZ')
print ("a2 =", a[2])
b[1] = mathutils.Euler((-A, A, 0), 'XYZ')
print ("b1 =", b[1])
o[1] = mathutils.Euler((A, A, 0), 'XYZ')
print ("o1 =", o[1])
B = A * 2 * sqrt (2)
C = B + (B * sqrt (2))
D = C * sqrt (2)
E = C + D
y[1] = mathutils.Euler((-A, -A, 0), 'XYZ')
print ("y1 =", y[1])
y[2] = mathutils.Euler((5.64394, -7.71944, 0), 'XYZ')
print ("y2 =", y[2])
b[2] = mathutils.Euler((6.96819, -6.39353, 0), 'XYZ')
print ("b2 =", b[2])
o[2] = mathutils.Euler((7.57679, -7.71912, 0), 'XYZ')
print ("o2 =", o[2])
y[3] = mathutils.Euler((5.64394, -17.9947, 0), 'XYZ')
print ("y3 =", y[3])
o[4] = mathutils.Euler((3.89305, -26.8512, 0), 'XYZ')
print ("o4 =", o[4])
o[3] = mathutils.Euler((-1.02857, -20.6204, 0), 'XYZ')
print ("o3 =", o[3])
b[4] = mathutils.Euler((12.94517, -7.71944, 0), 'XYZ')
print ("b4 =", b[4])
b[3] = mathutils.Euler((5.64394, -7.71944, -7.30119), 'XYZ')
print ("b3 =", b[3])
w[1] = mathutils.Euler((5.64394, -5.67138, -7.30119), 'XYZ')
print ("w1 =", w[1])
w[2] = mathutils.Euler((5.64394, -5.47138, -7.30119), 'XYZ')
print ("w2 =", w[2])
w[3] = mathutils.Euler((12.94517, -5.67138, 0), 'XYZ')
print ("w3 =", w[3])
b[6] = mathutils.Euler((1.7675, -A, 0), 'XYZ')
print ("b6 =", b[6])
b[5] = mathutils.Euler((-A, -A, -2.43247), 'XYZ')
print ("b5 =", b[5])
w[4] = mathutils.Euler((-A, A, -2.43247), 'XYZ')
print ("w4 =", w[4])
w[5] = mathutils.Euler((-A, 0.835105, -2.43247), 'XYZ')
print ("w5 =", w[5])
w[6] = mathutils.Euler((1.7675, A, 0), 'XYZ')
print ("w6 =", w[6])
def constructMovement(self, J, helicity, amt, rig, a, b, y, o, w):
# Linkages
aa = [[0 for i in range(4)] for j in range(4)] # Link α(i) - α(j)
ab = [[0 for i in range(4)] for j in range(4)] # Link α(i) - β(j)
ya = [[0 for i in range(4)] for j in range(4)] # Link γ(i) - α(j)
ao = [[0 for i in range(4)] for j in range(4)] # Link α(i) - δ(j)
ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - γ(j)
by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
yb = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - β(j)
bw = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - ω(j)
ww = [[0 for i in range(self.J)] for j in range(self.J)] # Link ω(i) - ω(j)
rig.location = mathutils.Euler((0.0, 0.0, 0.0), 'XYZ')
rig.show_x_ray = True
amt.show_names = True
amt.draw_type = 'STICK'
# amt.draw_type = 'BBONE'
# Link object to scene
scn = bpy.context.scene
scn.objects.link(rig)
scn.objects.active = rig
scn.update()
# Edit
bpy.ops.object.editmode_toggle()
# Construction Linkage
aa[2][1] = amt.edit_bones.new('a2a1')
aa[2][1].head = a[2]
aa[2][1].tail = a[1]
ab[1][1] = amt.edit_bones.new('a1b1')
ab[1][1].head = a[1]
ab[1][1].tail = b[1]
ab[1][1].parent = aa[2][1]
by[1][1] = amt.edit_bones.new('b1y1')
by[1][1].head = b[1]
by[1][1].tail = y[1]
by[1][1].parent = ab[1][1]
by[1][1].use_inherit_rotation = False
ya[1][2] = amt.edit_bones.new('y1a2')
ya[1][2].head = y[1]
ya[1][2].tail = a[2]
ya[1][2].parent = by[1][1]
ao[2][1] = amt.edit_bones.new('a2o1')
ao[2][1].head = a[2]
ao[2][1].tail = o[1]
ao[2][1].parent = ya[1][2]
ob[1][2] = amt.edit_bones.new('o1b2')
ob[1][2].head = o[1]
ob[1][2].tail = b[2]
ob[1][2].parent = ao[2][1]
yy[1][2] = amt.edit_bones.new('y1y2')
yy[1][2].head = y[1]
yy[1][2].tail = y[2]
yy[1][2].parent = by[1][1]
for j in range(2, J - 4):
by[j][j] = amt.edit_bones.new('b'+ str(j) + 'y'+ str(j))
by[j][j].head = b[j]
by[j][j].tail = y[j]
by[j][j].parent = ob[j-1][j]
yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j))
yo[j][j].head = y[j]
yo[j][j].tail = o[j]
yo[j][j].parent = yy[j-1][j]
yy[j][j+1] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+1))
yy[j][j+1].head = y[j]
yy[j][j+1].tail = y[j+1]
yy[j][j+1].parent = by[j][j]
if j < (J-5):
ob[j][j+1] = amt.edit_bones.new('o'+ str(j) + 'b'+ str(j+1))
ob[j][j+1].head = o[j]
ob[j][j+1].tail = b[j+1]
ob[j][j+1].parent = yo[j][j]
yo[3][3] = amt.edit_bones.new('y3o3')
yo[3][3].head = y[3]
yo[3][3].tail = o[3]
yo[3][3].parent = yy[2][3]
yo[3][3].use_inherit_rotation = False
yo[3][4] = amt.edit_bones.new('y3o4')
yo[3][4].head = y[3]
yo[3][4].tail = o[4]
yo[3][4].parent = yy[2][3]
yo[3][4].use_inherit_rotation = False
yb[2][3] = amt.edit_bones.new('y2b3')
yb[2][3].head = y[2]
yb[2][3].tail = b[3]
yb[2][3].parent = yy[1][2]
bw[3][1] = amt.edit_bones.new('b3w1')
bw[3][1].head = b[3]
bw[3][1].tail = w[1]
bw[3][1].parent = yb[2][3]
ww[1][2] = amt.edit_bones.new('w1w2')
ww[1][2].head = w[1]
ww[1][2].tail = w[2]
ww[1][2].parent = bw[3][1]
yb[2][4] = amt.edit_bones.new('y2b4.gimbal')
yb[2][4].head = y[2]
yb[2][4].tail = b[4]
yb[2][4].parent = yy[1][2]
bw[4][3] = amt.edit_bones.new('b4w3.gimbal')
bw[4][3].head = b[4]
bw[4][3].tail = w[3]
bw[4][3].parent = yb[2][4]
yb[1][5] = amt.edit_bones.new('y1b5')
yb[1][5].head = y[1]
yb[1][5].tail = b[5]
yb[1][5].parent = by[1][1]
bw[5][4] = amt.edit_bones.new('b5w4')
bw[5][4].head = b[5]
bw[5][4].tail = w[4]
bw[5][4].parent = yb[1][5]
ww[4][5] = amt.edit_bones.new('w4w5')
ww[4][5].head = w[4]
ww[4][5].tail = w[5]
ww[4][5].parent = bw[5][4]
yb[1][6] = amt.edit_bones.new('y1b6.gimbal')
yb[1][6].head = y[1]
yb[1][6].tail = b[6]
yb[1][6].parent = by[1][1]
bw[6][6] = amt.edit_bones.new('b6w6.gimbal')
bw[6][6].head = b[6]
bw[6][6].tail = w[6]
bw[6][6].parent = yb[1][6]
# all bones select
#bpy.ops.pose.select_all(action="SELECT")
for b in amt.edit_bones:
b.select = True
if helicity == 'right':
bpy.ops.armature.calculate_roll(type='GLOBAL_POS_Z')
else:
bpy.ops.armature.calculate_roll(type='GLOBAL_NEG_Z')
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint
cns = rig.pose.bones['y1a2'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'a2a1'
cns.chain_count = 2
cns.use_stretch = False
for j in range(2, J - 4):
cns = rig.pose.bones['b'+str(j) +'y'+str(j)].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j)+'o'+str(j)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['b4w3.gimbal'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'w1w2'
cns.pole_target = rig
cns.pole_subtarget = 'b3w1'
cns.pole_angle = math.radians(0)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['b6w6.gimbal'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'w4w5'
cns.pole_target = rig
cns.pole_subtarget = 'b5w4'
cns.pole_angle = math.radians(0)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
bpy.ops.object.mode_set(mode='OBJECT')
# Parent set pitch and right leg to left leg
def setParent(self, helicity, move, rig,
leg_left_loc, leg_left_rot,
leg_right_loc, leg_right_rot, leg_right,
pitch_loc, pitch_rot, pitch):
# leg left position
rig.location = leg_left_loc
rig.rotation_euler = leg_left_rot
# leg right position
leg_right.rig.location = leg_right_loc
leg_right.rig.rotation_euler = leg_right_rot
# pitch position
pitch.rig.location = pitch_loc
pitch.rig.rotation_euler = pitch_rot
# pitch to left leg
bpy.ops.object.mode_set(mode='OBJECT')
bpy.context.scene.frame_current = 0
bpy.ops.object.select_all(action='DESELECT')
rig.select = True
bpy.context.scene.objects.active = rig
bpy.ops.object.editmode_toggle()
parent_bone = 'y3o4' # choose the bone name which you want to be the parent
rig.data.edit_bones.active = rig.data.edit_bones[parent_bone]
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
pitch.rig.select = True
rig.select = True
bpy.context.scene.objects.active = rig #the active object will be the parent of all selected object
bpy.ops.object.parent_set(type='BONE', keep_transform=True)
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
# end
rig.select = True
bpy.context.scene.objects.active = rig
bpy.ops.object.editmode_toggle()
parent_bone = 'y3o3' # choose the bone name which you want to be the parent
rig.data.edit_bones.active = rig.data.edit_bones[parent_bone]
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
leg_right.rig.select = True
rig.select = True
bpy.context.scene.objects.active = rig #the active object will be the parent of all selected object
bpy.ops.object.parent_set(type='BONE', keep_transform=True)
bpy.ops.object.select_all(action='DESELECT') #deselect all objects
def configLink(self, A, J, helicity, rig, move, part):
bpy.ops.object.mode_set(mode='OBJECT')
Q = (0.18648+0.146446)*A
# Z = -Q*2
Z = 0.0
obj_joint = bpy.data.objects["joint.gold.000"].copy()
obj_joint.location = (0.0, 0.0, -Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2a1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.silver.001"].copy()
obj_joint.location = (0.0, 0.0, +Q+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y1a2.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, +Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2o1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a1b1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for n in range(1, J - 3):
if n <= (J-5):
# Pattern 2 of by
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
# Pattern 2 of yy
obj_joint = bpy.data.objects["joint.gold.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, +Q*(1 - (n % 2))*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
if n <= (J-6):
# Pattern 1 of ob
obj_joint = bpy.data.objects["joint.blue.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, -Q*4 + Q*(n % 2)*8 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
if n <= (J-6):
# Pattern 2 of yo
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n+1)+"o"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y3o3.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.y3o4.leg-left.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "y3o4.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.y2b3.leg-left.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y2b3.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "b3w1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "w1w2.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y3y5.gimbal.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y2b4.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.b4w3.gimbal.leg-left.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b4w3.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y1b5.leg-left.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y1b5.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "b5w4.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "w4w5.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y3y5.gimbal.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y1b6.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.b6w6.gimbal.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b6w6.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for ob in context.scene.objects:
if "mesh" in ob.name:
ob.select = True
bpy.ops.object.make_single_user(type='SELECTED_OBJECTS', object=True, obdata=True, material=True, texture=True, animation=True)
bpy.context.scene.cursor_location = (0.0, 0.0, 0.0)
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
class RightLeg(Formula):
J = 7 #joint number
# Overriding
def __init__(self, P, A, move, part, helicity, start, end):
global interval
global frame_start
global frame_end
self.interval = interval
self.frame_start = frame_start
self.frame_end = frame_end
# pivot factor
self.P = P
# scale factor
self.A = A
# name
self.move = move
# element
self.part = part
# element helicity
self.helicity = helicity
self.start = start
self.end = end
bpy.ops.object.mode_set(mode='OBJECT')
# Create armature and object
self.amt = bpy.data.armatures.new(move + '.' + part + '.' + helicity + '.data')
self.rig = bpy.data.objects.new(move + '.' + part + '.' + helicity, self.amt)
# Joints
self.a = [0 for i in range(4)] # Joint α
self.b = [0 for i in range(self.J)] # Joint β
self.y = [0 for i in range(self.J)] # Joint γ
self.o = [0 for i in range(self.J)] # Joint δ
self.w = [0 for i in range(self.J)] # Joint ω
# Configuration Movement
self.configMovement(self.P, self.A, self.J, self.a, self.b, self.y, self.o, self.w)
# Construction Movement
self.constructMovement(self.J, self.helicity, self.amt, self.rig, self.a, self.b, self.y, self.o, self.w)
# Construction Rotation
self.configRotation(self.rig, self.interval, self.frame_start, self.frame_end, self.start, self.end)
# Configuration Linkage
self.configLink(self.A, self.J, self.helicity, self.rig, self.move, self.part)
# Construction Linkage
self.constructLink(self.A, self.J, self.helicity, self.rig, self.move, self.part)
# Overriding Configuration Movement
def configMovement(self, P, A, J, a, b, y, o, w):
a[1] = mathutils.Euler((P, A, 0), 'XYZ')
print ("a1 =", a[1])
a[2] = mathutils.Euler((A, -A, 0), 'XYZ')
print ("a2 =", a[2])
b[1] = mathutils.Euler((-A, A, 0), 'XYZ')
print ("b1 =", b[1])
o[1] = mathutils.Euler((A, A, 0), 'XYZ')
print ("o1 =", o[1])
B = A * 2 * sqrt (2)
C = B + (B * sqrt (2))
D = C * sqrt (2)
E = C + D
y[1] = mathutils.Euler((-A, -A, 0), 'XYZ')
print ("y1 =", y[1])
y[2] = mathutils.Euler((5.68545, -7.44271, 0), 'XYZ')
print ("y2 =", y[2])
b[2] = mathutils.Euler((6.95751, -6.17062, 0), 'XYZ')
print ("b2 =", b[2])
o[2] = mathutils.Euler((7.53988, -7.37879, 0), 'XYZ')
print ("o2 =", o[2])
y[3] = mathutils.Euler((7.73628, -17.1943, 0), 'XYZ')
print ("y3 =", y[3])
b[4] = mathutils.Euler((11.3434, -7.44271, 0), 'XYZ')
print ("b4 =", b[4])
b[3] = mathutils.Euler((5.68546, -7.44271, -5.65991), 'XYZ')
print ("b3 =", b[3])
w[1] = mathutils.Euler((5.68546, -5.67138, -8.65991), 'XYZ')
print ("w1 =", w[1])
w[2] = mathutils.Euler((5.68546, -5.47138, -8.65991), 'XYZ')
print ("w2 =", w[2])
w[3] = mathutils.Euler((14.3434, -5.67138, 0), 'XYZ')
print ("w3 =", w[3])
b[6] = mathutils.Euler((10.1687, -17.1943, 0), 'XYZ')
print ("b6 =", b[6])
b[5] = mathutils.Euler((7.73628, -17.1943, -2.43205), 'XYZ')
print ("b5 =", b[5])
w[4] = mathutils.Euler((7.73628, -15.42297, -5.43205), 'XYZ')
print ("w4 =", w[4])
w[5] = mathutils.Euler((7.73628, -15.22297, -5.43205), 'XYZ')
print ("w5 =", w[5])
w[6] = mathutils.Euler((13.1687, -15.42297, 0), 'XYZ')
print ("w6 =", w[6])
def constructMovement(self, J, helicity, amt, rig, a, b, y, o, w):
# Linkages
aa = [[0 for i in range(4)] for j in range(4)] # Link α(i) - α(j)
ab = [[0 for i in range(4)] for j in range(4)] # Link α(i) - β(j)
ya = [[0 for i in range(4)] for j in range(4)] # Link γ(i) - α(j)
ao = [[0 for i in range(4)] for j in range(4)] # Link α(i) - δ(j)
ob = [[0 for i in range(self.J)] for j in range(self.J)] # Link δ(i) - β(j)
yy = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - γ(j)
by = [[0 for i in range(self.J)] for j in range(self.J)] # Link β(i) - γ(j)
yo = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - δ(j)
yb = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - β(j)
bw = [[0 for i in range(self.J)] for j in range(self.J)] # Link γ(i) - ω(j)
ww = [[0 for i in range(self.J)] for j in range(self.J)] # Link ω(i) - ω(j)
rig.location = mathutils.Euler((0.0, 0.0, 0.0), 'XYZ')
rig.show_x_ray = True
amt.show_names = True
amt.draw_type = 'STICK'
# amt.draw_type = 'BBONE'
# Link object to scene
scn = bpy.context.scene
scn.objects.link(rig)
scn.objects.active = rig
scn.update()
# Edit
bpy.ops.object.editmode_toggle()
# Construction Linkage
aa[2][1] = amt.edit_bones.new('a2a1')
aa[2][1].head = a[2]
aa[2][1].tail = a[1]
ab[1][1] = amt.edit_bones.new('a1b1')
ab[1][1].head = a[1]
ab[1][1].tail = b[1]
ab[1][1].parent = aa[2][1]
by[1][1] = amt.edit_bones.new('b1y1')
by[1][1].head = b[1]
by[1][1].tail = y[1]
by[1][1].parent = ab[1][1]
by[1][1].use_inherit_rotation = False
ya[1][2] = amt.edit_bones.new('y1a2')
ya[1][2].head = y[1]
ya[1][2].tail = a[2]
ya[1][2].parent = by[1][1]
ao[2][1] = amt.edit_bones.new('a2o1')
ao[2][1].head = a[2]
ao[2][1].tail = o[1]
ao[2][1].parent = ya[1][2]
ob[1][2] = amt.edit_bones.new('o1b2')
ob[1][2].head = o[1]
ob[1][2].tail = b[2]
ob[1][2].parent = ao[2][1]
yy[1][2] = amt.edit_bones.new('y1y2')
yy[1][2].head = y[1]
yy[1][2].tail = y[2]
yy[1][2].parent = by[1][1]
for j in range(2, J - 4):
by[j][j] = amt.edit_bones.new('b'+ str(j) + 'y'+ str(j))
by[j][j].head = b[j]
by[j][j].tail = y[j]
by[j][j].parent = ob[j-1][j]
yo[j][j] = amt.edit_bones.new('y'+ str(j) + 'o'+ str(j))
yo[j][j].head = y[j]
yo[j][j].tail = o[j]
yo[j][j].parent = yy[j-1][j]
yy[j][j+1] = amt.edit_bones.new('y'+ str(j) + 'y'+ str(j+1))
yy[j][j+1].head = y[j]
yy[j][j+1].tail = y[j+1]
yy[j][j+1].parent = by[j][j]
if j < (J - 5):
ob[j][j+1] = amt.edit_bones.new('o'+ str(j) + 'b'+ str(j+1))
ob[j][j+1].head = o[j]
ob[j][j+1].tail = b[j+1]
ob[j][j+1].parent = yo[j][j]
yb[2][3] = amt.edit_bones.new('y2b3')
yb[2][3].head = y[2]
yb[2][3].tail = b[3]
yb[2][3].parent = yy[1][2]
bw[3][1] = amt.edit_bones.new('b3w1')
bw[3][1].head = b[3]
bw[3][1].tail = w[1]
bw[3][1].parent = yb[2][3]
ww[1][2] = amt.edit_bones.new('w1w2')
ww[1][2].head = w[1]
ww[1][2].tail = w[2]
ww[1][2].parent = bw[3][1]
yb[2][4] = amt.edit_bones.new('y2b4.gimbal')
yb[2][4].head = y[2]
yb[2][4].tail = b[4]
yb[2][4].parent = yy[1][2]
bw[4][3] = amt.edit_bones.new('b4w3.gimbal')
bw[4][3].head = b[4]
bw[4][3].tail = w[3]
bw[4][3].parent = yb[2][4]
yb[3][5] = amt.edit_bones.new('y3b5')
yb[3][5].head = y[3]
yb[3][5].tail = b[5]
yb[3][5].parent = yy[2][3]
bw[5][4] = amt.edit_bones.new('b5w4')
bw[5][4].head = b[5]
bw[5][4].tail = w[4]
bw[5][4].parent = yb[3][5]
ww[4][5] = amt.edit_bones.new('w4w5')
ww[4][5].head = w[4]
ww[4][5].tail = w[5]
ww[4][5].parent = bw[5][4]
yb[3][6] = amt.edit_bones.new('y3b6.gimbal')
yb[3][6].head = y[3]
yb[3][6].tail = b[6]
yb[3][6].parent = yy[2][3]
bw[6][6] = amt.edit_bones.new('b6w6.gimbal')
bw[6][6].head = b[6]
bw[6][6].tail = w[6]
bw[6][6].parent = yb[3][6]
# all bones select
#bpy.ops.pose.select_all(action="SELECT")
for b in amt.edit_bones:
b.select = True
if helicity == 'right':
bpy.ops.armature.calculate_roll(type='GLOBAL_POS_Z')
else:
bpy.ops.armature.calculate_roll(type='GLOBAL_NEG_Z')
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint
cns = rig.pose.bones['y1a2'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'a2a1'
cns.chain_count = 2
cns.use_stretch = False
for j in range(2, J - 4):
cns = rig.pose.bones['b'+str(j) +'y'+str(j)].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'y'+str(j)+'o'+str(j)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['b4w3.gimbal'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'w1w2'
cns.pole_target = rig
cns.pole_subtarget = 'b3w1'
cns.pole_angle = math.radians(0)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
cns = rig.pose.bones['b6w6.gimbal'].constraints.new('IK')
cns.name = 'Ik'
cns.target = rig
cns.subtarget = 'w4w5'
cns.pole_target = rig
cns.pole_subtarget = 'b5w4'
cns.pole_angle = math.radians(0)
cns.iterations = 500
cns.chain_count = 2
cns.use_stretch = False
bpy.ops.object.mode_set(mode='OBJECT')
def configLink(self, A, J, helicity, rig, move, part):
bpy.ops.object.mode_set(mode='OBJECT')
Q = (0.18648+0.146446)*A
# Z = -Q*2
Z = 0.0
obj_joint = bpy.data.objects["joint.gold.leg-right.000"].copy()
obj_joint.location = (0.0, 0.0, -Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2a1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.silver.001"].copy()
obj_joint.location = (0.0, 0.0, +Q+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y1a2.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, +Q*3+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a2o1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.001"].copy()
obj_joint.location = (0.0, 0.0, -Q*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "a1b1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for n in range(1, J - 3):
if n <= (J-5):
# Pattern 2 of by
obj_joint = bpy.data.objects["joint.green.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b"+str(n)+"y"+str(n)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
if n <= (J-6):
# Pattern 2 of yy
obj_joint = bpy.data.objects["joint.gold.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, +Q*(1 - (n % 2))*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n)+"y"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
if n <= (J-6):
# Pattern 1 of ob
obj_joint = bpy.data.objects["joint.blue.00"+str(1 + (n+1) % 2)].copy()
obj_joint.location = (0.0, 0.0, -Q*4 + Q*(n % 2)*8 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "o"+str(n)+"b"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
if n <= (J-6):
# Pattern 2 of yo
obj_joint = bpy.data.objects["joint.copper.001"].copy()
obj_joint.location = (0.0, 0.0, -Q + Q*((n+1) % 2)*4 +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y"+str(n+1)+"o"+str(n+1)+".mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y2y3.leg-right.001"].copy()
obj_joint.location = (0.0, 0.0, +Q*2+Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y2y3.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.copper.y2b3.leg-right.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y2b3.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "b3w1.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "w1w2.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y3y5.gimbal.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y2b4.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.b4w3.gimbal.gimbal.leg-right.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b4w3.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y3b5.leg-right.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y3b5.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "b5w4.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["cursor.001"].copy()
obj_joint.location = (0.0, 0.0, 0.0)
obj_joint.scale = (A, A, A)
obj_joint.name = "w4w5.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.gold.y3y5.gimbal.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "y3b6.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
obj_joint = bpy.data.objects["joint.blue.b6w6.gimbal.001"].copy()
obj_joint.location = (0.0, 0.0, +Z)
obj_joint.scale = (A, A, A)
obj_joint.name = "b6w6.gimbal.mesh." + move + '.' + part +'.' + helicity
bpy.context.scene.objects.link(obj_joint)
for ob in context.scene.objects:
if "mesh" in ob.name:
ob.select = True
bpy.ops.object.make_single_user(type='SELECTED_OBJECTS', object=True, obdata=True, material=True, texture=True, animation=True)
bpy.context.scene.cursor_location = (0.0, 0.0, 0.0)
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
def formula():
# pivot factor
P = 0
# scale factor
A = 1
# name
move = 'formula'
# element
part = 'universe'
# left or right
helicity = 'left'
start = 0
end = start+360
formula = Formula(P, A, move, part, helicity, start, end)
def arms():
# scale factor
A = 0.380
# pivot factor
# P = 0.010708
P = 0
# name
move = 'kungfu'
# arm element
part = 'arm-left'
# left arm element
helicity = 'left'
start = -116.496
end = start+360
global arm_left
arm_left = LeftArm(P, A, move, part, helicity, start, end)
# arm element
part = 'arm-right'
# right arm element
helicity = 'right'
start = -252.189
end = start-360
global arm_right
arm_right = RightArm(P, A, move, part, helicity, start, end)
def legs():
# name
move = 'kungfu'
# arm element
part = 'leg-right'
# scale factor
A = 0.638694
# pivot factor
P = -0.02853
# P = 0
## right leg element
helicity = 'right'
start = -180.491
end = start-360
global leg_right
leg_right = RightLeg(P, A, move, part, helicity, start, end)
leg_right_loc = (8.88082, 1.22518, 22.398)
leg_right_rot = mathutils.Euler((math.radians(-90.0), math.radians(173.618), math.radians(0.0)), 'XYZ')
# arm element
part = 'leg-left'
# scale factor
A = 0.664895
# pivot factor
P = -0.030131
# P = 0
# left leg element
helicity = 'right'
start = -0.491119
end = start+360
global pitch
pitch_loc = (11.7981, 1.49764, 26.9466)
pitch_rot = mathutils.Euler((math.radians(90.0), math.radians(29.4707), math.radians(-90)), 'XYZ')
global leg_left
leg_left_loc = (14.053, 5.91232, 1.8578)
leg_left_rot = mathutils.Euler((math.radians(-90.0), math.radians(-13.2686), math.radians(0.0)), 'XYZ')
leg_left = LeftLeg(P, A, move, part, helicity, start, end,
leg_left_loc, leg_left_rot,
leg_right_loc, leg_right_rot, leg_right,
pitch_loc, pitch_rot, pitch)
global body
bpy.ops.object.mode_set(mode='OBJECT')
body.rig.select = True
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint
cns = body.rig.pose.bones['o4b5.gimbal.lower.right'].constraints.new('DAMPED_TRACK')
cns.name = 'Damped Track'
cns.target = leg_right.rig
cns.subtarget = 'y2b3'
cns.head_tail = 1
cns = body.rig.pose.bones['b5y5.gimbal.lower.right'].constraints.new('DAMPED_TRACK')
cns.name = 'Damped Track'
cns.target = leg_right.rig
cns.subtarget = 'y3b5'
cns.head_tail = 1
cns = body.rig.pose.bones['o4b5.gimbal.lower.left'].constraints.new('DAMPED_TRACK')
cns.name = 'Damped Track'
cns.target = leg_left.rig
cns.subtarget = 'y2b3'
cns.head_tail = 1
cns = body.rig.pose.bones['b5y5.gimbal.lower.left'].constraints.new('DAMPED_TRACK')
cns.name = 'Damped Track'
cns.target = leg_left.rig
cns.subtarget = 'y1b5'
cns.head_tail = 1
bpy.ops.object.mode_set(mode='OBJECT')
leg_right.rig.select = True
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
# IK constraint
cns = leg_right.rig.pose.bones['b3w1'].constraints.new('DAMPED_TRACK')
cns.name = 'Damped Track'
cns.target = body.rig
cns.subtarget = 'y3o3.lower.right'
cns.head_tail = 1
cns = leg_right.rig.pose.bones['b5w4'].constraints.new('DAMPED_TRACK')
cns.name = 'Damped Track'
cns.target = body.rig
cns.subtarget = 'b5y5.gimbal.lower.right'
cns.head_tail = 0
bpy.ops.object.mode_set(mode='OBJECT')
leg_left.rig.select = True
# Bone constraints. Armature must be in pose mode.
bpy.ops.object.mode_set(mode='POSE')
cns = leg_left.rig.pose.bones['b3w1'].constraints.new('DAMPED_TRACK')
cns.name = 'Damped Track'
cns.target = body.rig
cns.subtarget = 'y3o3.lower.left'
cns.head_tail = 1
cns = leg_left.rig.pose.bones['b5w4'].constraints.new('DAMPED_TRACK')
cns.name = 'Damped Track'
cns.target = body.rig
cns.subtarget = 'b5y5.gimbal.lower.left'
cns.head_tail = 0
bpy.ops.object.mode_set(mode='OBJECT')
def body():
# scale factor
A = 1
# pivot factor
P = -(A * 0.724843)
# name
move = 'kungfu'
# arm element
part = 'body'
# helicity of element
helicity = 'right'
start = 132.259
end = start + 360
# start = 295
# end = 632.6244
global arm_left
global arm_right
arm_left_loc = (2.76687, 4.25045, -0.112974)
arm_left_rot = mathutils.Euler((math.radians(-89.8155), math.radians(-136.51), math.radians(96.5434)), 'XYZ')
arm_right_loc = (-6.09865, 2.74983, 0.039255)
arm_right_rot = mathutils.Euler((math.radians(-157.882), math.radians(-78.9597), math.radians(148.427)), 'XYZ')
global body
body = Body(P, A, move, part, helicity, start, end,
arm_left_loc, arm_left_rot, arm_left,
arm_right_loc, arm_right_rot, arm_right)
def pitch():
# scale factor
A = 2.0
# pivot factor
P = -1.37
# name
move = 'kungfu'
# arm element
part = 'pitch'
# helicity of element
helicity = 'left'
start = 0.008209
end = start + 360
global body
body_loc = (-0.829043, 2.38647, -2.69095)
body_rot = mathutils.Euler((math.radians(-83.863), math.radians(87.2567), math.radians(-51.9298)), 'XYZ')
global pitch
pitch = Pitch(P, A, move, part, helicity, start, end,
body_loc, body_rot, body)
def main(origin):
global interval
global frame_start
global frame_end
frame_start = 0
frame_end = 48
interval = frame_end - frame_start
# formula()
arms()
body() #roll
pitch()
legs()
if __name__ == "__main__":
# renaming of corrada objects
# for ob in context.scene.objects:
# if "joint_" in ob.name:
# ob.name = ob.name.replace("_", ".")
main((0,0,0))
| 37.068657
| 135
| 0.538703
| 25,662
| 161,434
| 3.272777
| 0.027667
| 0.06563
| 0.01611
| 0.013812
| 0.904234
| 0.874896
| 0.84189
| 0.814636
| 0.801919
| 0.786857
| 0
| 0.053729
| 0.278157
| 161,434
| 4,354
| 136
| 37.07717
| 0.666996
| 0.165244
| 0
| 0.711254
| 0
| 0
| 0.06512
| 0.008497
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014778
| false
| 0
| 0.002274
| 0
| 0.022357
| 0.054566
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
077b6558613d88a8bbc11d9c217406d9a6622fa7
| 5,481
|
py
|
Python
|
rest-api/tests/test_api_auth.py
|
Toxe/gps-tracks
|
1118ff38995598920db3ea3b76be61dfc3f6a8e1
|
[
"MIT"
] | null | null | null |
rest-api/tests/test_api_auth.py
|
Toxe/gps-tracks
|
1118ff38995598920db3ea3b76be61dfc3f6a8e1
|
[
"MIT"
] | null | null | null |
rest-api/tests/test_api_auth.py
|
Toxe/gps-tracks
|
1118ff38995598920db3ea3b76be61dfc3f6a8e1
|
[
"MIT"
] | null | null | null |
from flask import url_for
from flask_jwt_extended import decode_token
from tests.example_data_fixtures import example_users
def test_login(client, example_users):
json = {"email": "user1@example.com", "password": "password1"}
r = client.post(url_for("auth.login"), json=json)
assert r.status_code == 200
assert r.is_json
access_token = r.get_json().get("access_token")
refresh_token = r.get_json().get("refresh_token")
assert access_token is not None
assert refresh_token is not None
token_data = decode_token(access_token)
refresh_token_data = decode_token(refresh_token)
assert token_data.get("identity") == 1
assert refresh_token_data.get("identity") == 1
def test_login_username_missing(client):
json = {"password": "password1"}
r = client.post(url_for("auth.login"), json=json)
assert r.status_code == 400
assert r.is_json
data = r.get_json()
assert data.get("message") == "Login email address or password missing."
assert "access_token" not in data
def test_login_password_missing(client):
json = {"email": "user1@example.com"}
r = client.post(url_for("auth.login"), json=json)
assert r.status_code == 400
assert r.is_json
data = r.get_json()
assert data.get("message") == "Login email address or password missing."
assert "access_token" not in data
def test_login_unknown_user(client, example_users):
r = client.post(
url_for("auth.login"),
json={"email": "unknown@example.com", "password": "password1"},
)
assert r.status_code == 401
assert r.is_json
data = r.get_json()
assert data.get("message") == "Login email address or password missing."
assert "access_token" not in data
def test_login_with_wrong_password(client, example_users):
json = {"email": "user1@example.com", "password": "wrong"}
r = client.post(url_for("auth.login"), json=json)
assert r.status_code == 401
assert r.is_json
data = r.get_json()
assert data.get("message") == "Login email address or password missing."
assert "access_token" not in data
def test_refresh_token(client, auth, example_users):
auth.login("user1@example.com", "password1")
old_access_token = auth.access_token
r = client.post(
url_for("auth.refresh"),
headers={"Authorization": f"Bearer {auth.refresh_token}"},
)
assert r.status_code == 200
assert r.is_json
access_token = r.get_json().get("access_token")
assert access_token is not None
assert access_token != old_access_token
token_data = decode_token(access_token)
assert token_data.get("identity") == 1
def test_cannot_call_refresh_for_unknown_user(client, auth, example_users):
auth.login("user1@example.com", "password1")
r = client.delete(
url_for("api.delete_user", user_id=auth.id),
headers={"Authorization": f"Bearer {auth.access_token}"},
)
assert r.status_code == 204
r = client.post(
url_for("auth.refresh"),
headers={"Authorization": f"Bearer {auth.refresh_token}"},
)
assert r.status_code == 401
def test_cannot_call_refresh_with_access_token(client, auth, example_users):
auth.login("user1@example.com", "password1")
r = client.post(
url_for("auth.refresh"),
headers={"Authorization": f"Bearer {auth.access_token}"},
)
assert r.status_code == 422
assert r.is_json
assert r.get_json().get("error") == "Only refresh tokens are allowed"
def test_cannot_call_protected_api_with_refresh_token(client, auth, example_users):
auth.login("user1@example.com", "password1")
r = client.delete(
url_for("api.delete_user", user_id=1),
headers={"Authorization": f"Bearer {auth.refresh_token}"},
)
assert r.status_code == 422
assert r.is_json
assert r.get_json().get("error") == "Only access tokens are allowed"
def test_logout_access_token(client, auth, example_users):
auth.login("user1@example.com", "password1")
# logout and blacklist access token
r = client.delete(
url_for("auth.logout_access_token"),
headers={"Authorization": f"Bearer {auth.access_token}"},
)
assert r.status_code == 200
assert r.is_json
assert r.get_json().get("message") == "Successfully logged out."
# no longer logged in
r = client.delete(
url_for("api.delete_user", user_id=1),
headers={"Authorization": f"Bearer {auth.access_token}"},
)
assert r.status_code == 401
assert r.is_json
assert r.get_json().get("error") == "Token has been revoked"
# request new access token
r = client.post(
url_for("auth.refresh"),
headers={"Authorization": f"Bearer {auth.refresh_token}"},
)
assert r.status_code == 200
def test_logout_refresh_token(client, auth, example_users):
auth.login("user1@example.com", "password1")
# logout refresh token
r = client.delete(
url_for("auth.logout_refresh_token"),
headers={"Authorization": f"Bearer {auth.refresh_token}"},
)
assert r.status_code == 200
assert r.is_json
assert r.get_json().get("message") == "Successfully logged out."
# cannot request new access token
r = client.post(
url_for("auth.refresh"),
headers={"Authorization": f"Bearer {auth.refresh_token}"},
)
assert r.status_code == 401
assert r.is_json
assert r.get_json().get("error") == "Token has been revoked"
| 34.25625
| 83
| 0.675607
| 759
| 5,481
| 4.661397
| 0.108037
| 0.065291
| 0.055116
| 0.072075
| 0.851046
| 0.81515
| 0.796213
| 0.768231
| 0.721029
| 0.695025
| 0
| 0.015381
| 0.193395
| 5,481
| 159
| 84
| 34.471698
| 0.78489
| 0.023901
| 0
| 0.671756
| 0
| 0
| 0.260853
| 0.009169
| 0
| 0
| 0
| 0
| 0.366412
| 1
| 0.083969
| false
| 0.122137
| 0.022901
| 0
| 0.10687
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
07834049f50c9c1472710457c00fcca43f8e65e9
| 191
|
py
|
Python
|
batch_process_script.py
|
Retr0Metal98/file_manipulators
|
fb9992c5e34910a3ddbb97ea88c3fe2e86477818
|
[
"MIT"
] | null | null | null |
batch_process_script.py
|
Retr0Metal98/file_manipulators
|
fb9992c5e34910a3ddbb97ea88c3fe2e86477818
|
[
"MIT"
] | 4
|
2020-08-06T11:57:59.000Z
|
2020-08-14T15:45:33.000Z
|
batch_process_script.py
|
Retr0Metal98/file_manipulators
|
fb9992c5e34910a3ddbb97ea88c3fe2e86477818
|
[
"MIT"
] | null | null | null |
from file_manipulators.batch_process_ops import batchOps_from_command_line
# Run command line functionality from batch_process_ops
if __name__ == '__main__':
batchOps_from_command_line()
| 38.2
| 74
| 0.848168
| 26
| 191
| 5.5
| 0.576923
| 0.230769
| 0.20979
| 0.321678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104712
| 191
| 4
| 75
| 47.75
| 0.836257
| 0.277487
| 0
| 0
| 0
| 0
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
07c9b0da818f0d7257d30cd94521f4d69da65a68
| 9,034
|
py
|
Python
|
tests/test_validators.py
|
jonboland/colosseum
|
cbf974be54fd7f6fddbe7285704cfaf7a866c5c5
|
[
"BSD-3-Clause"
] | 71
|
2015-04-13T09:44:14.000Z
|
2019-03-24T01:03:02.000Z
|
tests/test_validators.py
|
jonboland/colosseum
|
cbf974be54fd7f6fddbe7285704cfaf7a866c5c5
|
[
"BSD-3-Clause"
] | 35
|
2019-05-06T15:26:09.000Z
|
2022-03-28T06:30:33.000Z
|
tests/test_validators.py
|
jonboland/colosseum
|
cbf974be54fd7f6fddbe7285704cfaf7a866c5c5
|
[
"BSD-3-Clause"
] | 139
|
2015-05-30T18:37:43.000Z
|
2019-03-27T17:14:05.000Z
|
from unittest import TestCase
from colosseum.shapes import Rect
from colosseum.units import px, percent
from colosseum.validators import (
ValidationError,
is_border_spacing,
is_cursor,
is_integer,
is_number,
is_percentage,
is_quote,
is_rect,
is_uri,
)
from colosseum.wrappers import Quotes
class PercentTests(TestCase):
def test_percentage(self):
percent_value = is_percentage("100%")
self.assertEqual(percent_value, 100*percent)
self.assertEqual(type(percent_value), type(percent))
with self.assertRaises(ValidationError):
is_percentage("-100%")
with self.assertRaises(ValidationError):
is_percentage("100")
with self.assertRaises(ValidationError):
is_percentage('spam')
class NumericTests(TestCase):
def test_integer(self):
self.assertEqual(is_integer('1'), 1)
validator = is_integer(min_value=0, max_value=12)
self.assertEqual(validator('1'), 1)
self.assertEqual(validator('0'), 0)
self.assertEqual(validator('12'), 12)
with self.assertRaises(ValidationError):
validator(-2)
with self.assertRaises(ValidationError):
validator(15)
with self.assertRaises(ValidationError):
validator('spam')
def test_number(self):
self.assertEqual(is_number('1'), 1.0)
validator = is_number(min_value=0, max_value=12)
self.assertEqual(validator('1.0'), 1.0)
self.assertEqual(validator('0.0'), 0.0)
self.assertEqual(validator('12.0'), 12.0)
with self.assertRaises(ValidationError):
validator(-2)
with self.assertRaises(ValidationError):
validator(15)
with self.assertRaises(ValidationError):
validator('spam')
class BorderSpacingTests(TestCase):
def test_border_spacing_valid_str_1_item(self):
self.assertEqual(is_border_spacing('1').horizontal, 1 * px)
self.assertEqual(is_border_spacing('1').vertical, 1 * px)
def test_border_spacing_valid_str_1_item_with_spaces(self):
self.assertEqual(is_border_spacing(' 1 ').horizontal, 1 * px)
self.assertEqual(is_border_spacing(' 1 ').vertical, 1 * px)
def test_border_spacing_valid_str_2_items(self):
self.assertEqual(is_border_spacing('1 2').horizontal, 1 * px)
self.assertEqual(is_border_spacing('1 2').vertical, 2 * px)
def test_border_spacing_valid_str_2_items_with_spaces(self):
self.assertEqual(is_border_spacing(' 1 2 ').horizontal, 1 * px)
self.assertEqual(is_border_spacing(' 1 2 ').vertical, 2 * px)
def test_border_spacing_valid_int_1_item(self):
self.assertEqual(is_border_spacing(1).horizontal, 1 * px, )
self.assertEqual(is_border_spacing(1).vertical, 1 * px, )
def test_border_spacing_valid_int_2_items_sequence(self):
# List
self.assertEqual(is_border_spacing([1, 2]).horizontal, 1 * px)
self.assertEqual(is_border_spacing([1, 2]).vertical, 2 * px)
# Tuple
self.assertEqual(is_border_spacing((1, 2)).horizontal, 1 * px)
self.assertEqual(is_border_spacing((1, 2)).vertical, 2 * px)
def test_border_spacing_valid_float_1_item(self):
self.assertEqual(is_border_spacing(1.0).horizontal, 1 * px)
self.assertEqual(is_border_spacing(1.0).vertical, 1 * px)
def test_border_spacing_valid_float_2_items_sequence(self):
# List
self.assertEqual(is_border_spacing([1.0, 2.0]).horizontal, 1 * px)
self.assertEqual(is_border_spacing([1.0, 2.0]).vertical, 2 * px)
# Tuple
self.assertEqual(is_border_spacing((1.0, 2.0)).horizontal, 1 * px)
self.assertEqual(is_border_spacing((1.0, 2.0)).vertical, 2 * px)
def test_border_spacing_invalid_units_str_2_item_commas(self):
with self.assertRaises(ValidationError):
is_border_spacing('1, 2')
def test_border_spacing_invalid_units_str_1_item(self):
with self.assertRaises(ValidationError):
is_border_spacing('a a')
def test_border_spacing_invalid_units_str_2_items(self):
with self.assertRaises(ValidationError):
is_border_spacing('b')
def test_border_spacing_invalid_units_sequence_2_items(self):
# List
with self.assertRaises(ValidationError):
is_border_spacing(['a', 'b'])
# Tuple
with self.assertRaises(ValidationError):
is_border_spacing(('a', 'b'))
def test_border_spacing_invalid_length_str_0_items(self):
with self.assertRaises(ValidationError):
is_border_spacing('')
def test_border_spacing_invalid_length_str_3_items(self):
with self.assertRaises(ValidationError):
is_border_spacing('1 2 3')
def test_border_spacing_invalid_length_sequence_0_items(self):
# List
with self.assertRaises(ValidationError):
is_border_spacing([])
# Tuple
with self.assertRaises(ValidationError):
is_border_spacing(())
def test_border_spacing_invalid_length_sequence_3_items(self):
# List
with self.assertRaises(ValidationError):
is_border_spacing([1, 2, 3])
# tuple
with self.assertRaises(ValidationError):
is_border_spacing((1, 2, 3))
class RectTests(TestCase):
"""
Comprehensive rect tests are found in the parser tests.
This test checks basic cases work as expected.
"""
def test_rect_valid(self):
self.assertEqual(is_rect('rect(1px, 3px, 2px, 4px)'), Rect(1, 3, 2, 4))
def test_rect_invalid(self):
with self.assertRaises(ValidationError):
is_rect('1px, 3px 2px, 4px')
class QuotesTests(TestCase):
"""
Comprehensive quotes tests are found in the parser tests.
This test checks basic cases work as expected.
"""
def test_quote_valid(self):
self.assertEqual(is_quote("'<' '>' '{' '}'"), Quotes([('<', '>'), ('{', '}')]))
def test_quote_invalid(self):
with self.assertRaises(ValidationError):
is_quote("'<' '>' '{'")
class UriTests(TestCase):
"""Comprehensive tests are found on test_parser.py."""
def test_url_valid(self):
url = is_uri("url(some.url)")
self.assertEqual(str(url), 'url("some.url")')
url = is_uri(" url(some.url) ")
self.assertEqual(str(url), 'url("some.url")')
url = is_uri(r"url(some.\ url)")
self.assertEqual(str(url), r'url("some.\ url")')
url = is_uri("url('some.url')")
self.assertEqual(str(url), 'url("some.url")')
url = is_uri("url( 'some.url' )")
self.assertEqual(str(url), 'url("some.url")')
url = is_uri('url("some.url")')
self.assertEqual(str(url), 'url("some.url")')
url = is_uri('url( "some.url" )')
self.assertEqual(str(url), 'url("some.url")')
class CursorTests(TestCase):
"""Comprehensive tests are found on test_parser.py."""
def test_cursor_valid_1_item(self):
cursor = is_cursor("url(some.url)")
self.assertEqual(str(cursor), 'url("some.url")')
cursor = is_cursor(" url(some.url) ")
self.assertEqual(str(cursor), 'url("some.url")')
cursor = is_cursor("url('some.url')")
self.assertEqual(str(cursor), 'url("some.url")')
cursor = is_cursor("url( 'some.url' )")
self.assertEqual(str(cursor), 'url("some.url")')
cursor = is_cursor('url("some.url")')
self.assertEqual(str(cursor), 'url("some.url")')
cursor = is_cursor('url( "some.url" )')
self.assertEqual(str(cursor), 'url("some.url")')
def test_cursor_valid_2_items(self):
cursor = is_cursor("url(some.url), url(some.url2)")
self.assertEqual(str(cursor), 'url("some.url"), url("some.url2")')
cursor = is_cursor("url(some.url), auto")
self.assertEqual(str(cursor), 'url("some.url"), auto')
cursor = is_cursor(["url(some.url)", "url(some.url2)"])
self.assertEqual(str(cursor), 'url("some.url"), url("some.url2")')
cursor = is_cursor(["url(some.url)", "auto"])
self.assertEqual(str(cursor), 'url("some.url"), auto')
def test_cursor_invalid_1_item(self):
with self.assertRaises(ValidationError):
is_cursor("foobar")
with self.assertRaises(ValidationError):
is_cursor(["foobar"])
def test_cursor_invalid_2_items(self):
with self.assertRaises(ValidationError):
is_cursor("foobar, blah")
with self.assertRaises(ValidationError):
is_cursor("auto, url( something )")
with self.assertRaises(ValidationError):
is_cursor(["foobar", 'blah'])
with self.assertRaises(ValidationError):
is_cursor(["auto", "url(something)"])
with self.assertRaises(ValidationError):
is_cursor(["url(something)", "auto", "url(something)"])
| 32.496403
| 87
| 0.639584
| 1,119
| 9,034
| 4.934763
| 0.085791
| 0.133104
| 0.061572
| 0.18381
| 0.83774
| 0.813473
| 0.797356
| 0.738863
| 0.687432
| 0.650851
| 0
| 0.022834
| 0.224375
| 9,034
| 277
| 88
| 32.613718
| 0.765235
| 0.040182
| 0
| 0.40113
| 0
| 0
| 0.100313
| 0
| 0
| 0
| 0
| 0
| 0.440678
| 1
| 0.158192
| false
| 0
| 0.028249
| 0
| 0.225989
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
07e2c061982d4a27b10d2a03773b9667626140e4
| 4,840
|
py
|
Python
|
scenarios/multimemory/plot_multimemory_variant.py
|
jwallnoefer/multisat_qrepeater_sim_archive
|
69b4c242fb760cf195871f38b3172d4dfd26c01a
|
[
"MIT"
] | null | null | null |
scenarios/multimemory/plot_multimemory_variant.py
|
jwallnoefer/multisat_qrepeater_sim_archive
|
69b4c242fb760cf195871f38b3172d4dfd26c01a
|
[
"MIT"
] | null | null | null |
scenarios/multimemory/plot_multimemory_variant.py
|
jwallnoefer/multisat_qrepeater_sim_archive
|
69b4c242fb760cf195871f38b3172d4dfd26c01a
|
[
"MIT"
] | null | null | null |
import os, sys; sys.path.insert(0, os.path.abspath("."))
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# first: fixed cutoff, variable length and memories
result_path = os.path.join("results", "multimemory_variant_cutoff")
fig = plt.figure()
for num_memories in [1, 5, 10, 50, 100, 400]:
df = pd.read_csv(os.path.join(result_path, "%d_memories" % num_memories, "result.csv"), index_col=0)
x = df.index / 1000
y = df["key_per_resource"] / 2
yerr = df["key_per_resource_std"] / 2
plt.errorbar(x, y, yerr=yerr, fmt="o", label="num_memories=%d" % num_memories)
plt.yscale("log")
plt.ylim(1e-7, 1e-2)
plt.xlim(0, 300)
plt.legend()
plt.xlabel("L [km]")
plt.ylabel("key per resource")
plt.title("fixed cutoff 3 * expected_time")
plt.grid()
plt.show()
for num_memories in [1, 5, 10, 50, 100, 400]:
df = pd.read_csv(os.path.join(result_path, "%d_memories" % num_memories, "result.csv"), index_col=0)
x = df.index / 1000
y = df["key_per_time"] / 2
yerr = df["key_per_time_std"] / 2
plt.errorbar(x, y, yerr=yerr, fmt="o", label="num_memories=%d" % num_memories)
plt.yscale("log")
plt.ylim(5e-3, 1e5)
plt.xlim(0, 300)
plt.legend()
plt.xlabel("L [km]")
plt.ylabel("key per time")
plt.title("fixed cutoff 3 * expected_time")
plt.grid()
plt.show()
# second: fixed cutoff, x-axis=memories
result_path = os.path.join("results", "multimemory_variant_memories")
for length in [50e3, 100e3, 150e3, 200e3]:
df = pd.read_csv(os.path.join(result_path, "%d_km" % (length / 1000), "result.csv"), index_col=0)
x = df.index / 1000
y = df["key_per_resource"] / 2
yerr = df["key_per_resource_std"] / 2
plt.errorbar(x, y, yerr=yerr, fmt="o", label="length=%d km" % (length / 1000))
plt.yscale("log")
plt.ylim(1e-6, 1e-3)
# plt.xlim(0, 300)
plt.legend()
plt.xlabel("number of memories")
plt.ylabel("key per resource")
plt.title("fixed cutoff 3 * expected_time")
plt.grid()
plt.show()
for length in [50e3, 100e3, 150e3, 200e3]:
df = pd.read_csv(os.path.join(result_path, "%d_km" % (length / 1000), "result.csv"), index_col=0)
x = df.index / 1000
y = df["key_per_time"] / 2
yerr = df["key_per_time_std"] / 2
plt.errorbar(x, y, yerr=yerr, fmt="o", label="length=%d km" % (length / 1000))
plt.yscale("log")
plt.ylim(1e-3, 1e4)
# plt.xlim(0, 300)
plt.legend()
plt.xlabel("number of memories")
plt.ylabel("key per time")
plt.title("fixed cutoff 3 * expected_time")
plt.grid()
plt.show()
# third: fixed length, x-axis=cutoff_times
result_path = os.path.join("results", "multimemory_variant_by_cutoff")
for num_memories in [1, 5, 10, 50, 100, 400]:
df = pd.read_csv(os.path.join(result_path, "%d_memories" % num_memories, "result.csv"), index_col=0)
x = df.index
y = df["key_per_resource"] / 2
yerr = df["key_per_resource_std"] / 2
plt.errorbar(x, y, yerr=yerr, fmt="o", label="num_memories=%d" % num_memories)
plt.yscale("log")
plt.ylim(1e-6, 1e-4)
# plt.xlim(0, 300)
plt.legend()
plt.xlabel("cutoff_time")
plt.ylabel("key per resource")
plt.title("fixed length 150km")
plt.grid()
plt.show()
for num_memories in [1, 5, 10, 50, 100, 400]:
df = pd.read_csv(os.path.join(result_path, "%d_memories" % num_memories, "result.csv"), index_col=0)
x = df.index
y = df["key_per_time"] / 2
yerr = df["key_per_time_std"] / 2
plt.errorbar(x, y, yerr=yerr, fmt="o", label="num_memories=%d" % num_memories)
plt.yscale("log")
plt.ylim(1e-3, 1e2)
# plt.xlim(0, 300)
plt.legend()
plt.xlabel("cutoff_time")
plt.ylabel("key per time")
plt.title("fixed length 150km")
plt.grid()
plt.show()
# fourth: fixed memories, x-axis=length
result_path = os.path.join("results", "multimemory_variant_fixed_mem")
for cutoff_multiplier in [0.001, 0.005, 0.010, 0.020, 0.030, 0.050, 0.100, 0.250, 0.500]:
df = pd.read_csv(os.path.join(result_path, "%.3f_cutoff" % cutoff_multiplier, "result.csv"), index_col=0)
x = df.index / 1000
y = df["key_per_resource"] / 2
yerr = df["key_per_resource_std"] / 2
plt.errorbar(x, y, yerr=yerr, fmt="o", label="cutoff_multiplier=%.3f" % cutoff_multiplier)
plt.yscale("log")
plt.ylim(1e-8, 1e-2)
# plt.xlim(0, 300)
plt.legend()
plt.xlabel("L [km]")
plt.ylabel("key per resource")
plt.title("fixed 400 memories")
plt.grid()
plt.show()
for cutoff_multiplier in [0.001, 0.005, 0.010, 0.020, 0.030, 0.050, 0.100, 0.250, 0.500]:
df = pd.read_csv(os.path.join(result_path, "%.3f_cutoff" % cutoff_multiplier, "result.csv"), index_col=0)
x = df.index / 1000
y = df["key_per_time"] / 2
yerr = df["key_per_time_std"] / 2
plt.errorbar(x, y, yerr=yerr, fmt="o", label="cutoff_multiplier=%.3f" % cutoff_multiplier)
plt.yscale("log")
# plt.ylim(1e-3, 1e2)
# plt.xlim(0, 300)
plt.legend()
plt.xlabel("L [km]")
plt.ylabel("key per time")
plt.title("fixed 400 memories")
plt.grid()
plt.show()
| 31.842105
| 109
| 0.667355
| 848
| 4,840
| 3.670991
| 0.125
| 0.046258
| 0.041118
| 0.028269
| 0.913909
| 0.913909
| 0.913909
| 0.913588
| 0.878895
| 0.808866
| 0
| 0.072222
| 0.144628
| 4,840
| 151
| 110
| 32.05298
| 0.67971
| 0.059504
| 0
| 0.868852
| 0
| 0
| 0.24207
| 0.034361
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.032787
| 0
| 0.032787
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6afda9e724ba85e9cd172da0fe05a6480d5d39c1
| 28
|
py
|
Python
|
Packs/Whois/Integrations/Whois/demistomock_params.py
|
nicholasericksen/xsoar
|
1cf46956bf37f0fea5a72920aee4d6af3f0c6380
|
[
"MIT"
] | 1
|
2021-04-20T07:10:06.000Z
|
2021-04-20T07:10:06.000Z
|
Packs/Whois/Integrations/Whois/demistomock_params.py
|
nicholasericksen/xsoar
|
1cf46956bf37f0fea5a72920aee4d6af3f0c6380
|
[
"MIT"
] | null | null | null |
Packs/Whois/Integrations/Whois/demistomock_params.py
|
nicholasericksen/xsoar
|
1cf46956bf37f0fea5a72920aee4d6af3f0c6380
|
[
"MIT"
] | null | null | null |
def params():
return {}
| 9.333333
| 13
| 0.535714
| 3
| 28
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 28
| 2
| 14
| 14
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
9c181e90deb16591584793eb5b453ba2326cc8a0
| 24,731
|
py
|
Python
|
tests/components/bayesian/test_binary_sensor.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 11
|
2018-02-16T15:35:47.000Z
|
2020-01-14T15:20:00.000Z
|
tests/components/bayesian/test_binary_sensor.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 87
|
2020-07-06T22:22:54.000Z
|
2022-03-31T06:01:46.000Z
|
tests/components/bayesian/test_binary_sensor.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 11
|
2020-12-16T13:48:14.000Z
|
2022-02-01T00:28:05.000Z
|
"""The test for the bayesian sensor platform."""
import json
from os import path
from unittest.mock import patch
from homeassistant import config as hass_config
from homeassistant.components.bayesian import DOMAIN, binary_sensor as bayesian
from homeassistant.components.homeassistant import (
DOMAIN as HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_RELOAD,
STATE_OFF,
STATE_ON,
STATE_UNKNOWN,
)
from homeassistant.core import Context, callback
from homeassistant.setup import async_setup_component
async def test_load_values_when_added_to_hass(hass):
"""Test that sensor initializes with observations of relevant entities."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
hass.states.async_set("sensor.test_monitored", "off")
await hass.async_block_till_done()
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8
assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4
async def test_unknown_state_does_not_influence_probability(hass):
"""Test that an unknown state does not change the output probability."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
hass.states.async_set("sensor.test_monitored", STATE_UNKNOWN)
await hass.async_block_till_done()
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations") == []
async def test_sensor_numeric_state(hass):
"""Test sensor on numeric state platform observations."""
config = {
"binary_sensor": {
"platform": "bayesian",
"name": "Test_Binary",
"observations": [
{
"platform": "numeric_state",
"entity_id": "sensor.test_monitored",
"below": 10,
"above": 5,
"prob_given_true": 0.6,
},
{
"platform": "numeric_state",
"entity_id": "sensor.test_monitored1",
"below": 7,
"above": 5,
"prob_given_true": 0.9,
"prob_given_false": 0.1,
},
],
"prior": 0.2,
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", 4)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("observations")
assert state.attributes.get("probability") == 0.2
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 6)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", 4)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", 6)
hass.states.async_set("sensor.test_monitored1", 6)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.6
assert state.attributes.get("observations")[1]["prob_given_true"] == 0.9
assert state.attributes.get("observations")[1]["prob_given_false"] == 0.1
assert round(abs(0.77 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 6)
hass.states.async_set("sensor.test_monitored1", 0)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", 4)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("probability") == 0.2
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 15)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert state.state == "off"
async def test_sensor_state(hass):
"""Test sensor on state platform observations."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "on")
state = hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("observations")
assert state.attributes.get("probability") == 0.2
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", "off")
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "on")
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8
assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4
assert round(abs(0.33 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", "off")
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert round(abs(0.2 - state.attributes.get("probability")), 7) == 0
assert state.state == "off"
async def test_sensor_value_template(hass):
"""Test sensor on template platform observations."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "template",
"value_template": "{{states('sensor.test_monitored') == 'off'}}",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "on")
state = hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("observations")
assert state.attributes.get("probability") == 0.2
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", "off")
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "on")
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8
assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4
assert round(abs(0.33 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", "off")
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert round(abs(0.2 - state.attributes.get("probability")), 7) == 0
assert state.state == "off"
async def test_threshold(hass):
"""Test sensor on probability threshold limits."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "on",
"prob_given_true": 1.0,
}
],
"prior": 0.5,
"probability_threshold": 1.0,
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert round(abs(1.0 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
async def test_multiple_observations(hass):
"""Test sensor with multiple observations of same entity."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "blue",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
},
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "red",
"prob_given_true": 0.2,
"prob_given_false": 0.4,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "off")
state = hass.states.get("binary_sensor.test_binary")
for key, attrs in state.attributes.items():
json.dumps(attrs)
assert [] == state.attributes.get("observations")
assert state.attributes.get("probability") == 0.2
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", "blue")
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "off")
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "blue")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8
assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4
assert round(abs(0.33 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", "blue")
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "red")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert round(abs(0.11 - state.attributes.get("probability")), 7) == 0
assert state.state == "off"
async def test_probability_updates(hass):
"""Test probability update function."""
prob_given_true = [0.3, 0.6, 0.8]
prob_given_false = [0.7, 0.4, 0.2]
prior = 0.5
for pt, pf in zip(prob_given_true, prob_given_false):
prior = bayesian.update_probability(prior, pt, pf)
assert round(abs(0.720000 - prior), 7) == 0
prob_given_true = [0.8, 0.3, 0.9]
prob_given_false = [0.6, 0.4, 0.2]
prior = 0.7
for pt, pf in zip(prob_given_true, prob_given_false):
prior = bayesian.update_probability(prior, pt, pf)
assert round(abs(0.9130434782608695 - prior), 7) == 0
async def test_observed_entities(hass):
"""Test sensor on observed entities."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.9,
"prob_given_false": 0.4,
},
{
"platform": "template",
"value_template": "{{is_state('sensor.test_monitored1','on') and is_state('sensor.test_monitored','off')}}",
"prob_given_true": 0.9,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "on")
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored1", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("occurred_observation_entities")
hass.states.async_set("sensor.test_monitored", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert ["sensor.test_monitored"] == state.attributes.get(
"occurred_observation_entities"
)
hass.states.async_set("sensor.test_monitored1", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert ["sensor.test_monitored", "sensor.test_monitored1"] == sorted(
state.attributes.get("occurred_observation_entities")
)
async def test_state_attributes_are_serializable(hass):
"""Test sensor on observed entities."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.9,
"prob_given_false": 0.4,
},
{
"platform": "template",
"value_template": "{{is_state('sensor.test_monitored1','on') and is_state('sensor.test_monitored','off')}}",
"prob_given_true": 0.9,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "on")
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored1", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("occurred_observation_entities")
hass.states.async_set("sensor.test_monitored", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert ["sensor.test_monitored"] == state.attributes.get(
"occurred_observation_entities"
)
hass.states.async_set("sensor.test_monitored1", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert ["sensor.test_monitored", "sensor.test_monitored1"] == sorted(
state.attributes.get("occurred_observation_entities")
)
for key, attrs in state.attributes.items():
json.dumps(attrs)
async def test_template_error(hass, caplog):
"""Test sensor with template error."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "template",
"value_template": "{{ xyz + 1 }}",
"prob_given_true": 0.9,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "off"
assert "TemplateError" in caplog.text
assert "xyz" in caplog.text
async def test_update_request_with_template(hass):
"""Test sensor on template platform observations that gets an update request."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "template",
"value_template": "{{states('sensor.test_monitored') == 'off'}}",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
await async_setup_component(hass, "binary_sensor", config)
await async_setup_component(hass, HA_DOMAIN, {})
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "off"
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: "binary_sensor.test_binary"},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "off"
async def test_update_request_without_template(hass):
"""Test sensor on template platform observations that gets an update request."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.9,
"prob_given_false": 0.4,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
await async_setup_component(hass, "binary_sensor", config)
await async_setup_component(hass, HA_DOMAIN, {})
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "on")
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "off"
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: "binary_sensor.test_binary"},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "off"
async def test_monitored_sensor_goes_away(hass):
"""Test sensor on template platform observations that goes away."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "on",
"prob_given_true": 0.9,
"prob_given_false": 0.4,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
await async_setup_component(hass, "binary_sensor", config)
await async_setup_component(hass, HA_DOMAIN, {})
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "on")
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "on"
hass.states.async_remove("sensor.test_monitored")
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == "on"
async def test_reload(hass):
"""Verify we can reload bayesian sensors."""
config = {
"binary_sensor": {
"name": "test",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "on",
"prob_given_true": 0.9,
"prob_given_false": 0.4,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert hass.states.get("binary_sensor.test")
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"bayesian/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert hass.states.get("binary_sensor.test") is None
assert hass.states.get("binary_sensor.test2")
def _get_fixtures_base_path():
return path.dirname(path.dirname(path.dirname(__file__)))
async def test_template_triggers(hass):
"""Test sensor with template triggers."""
hass.states.async_set("input_boolean.test", STATE_OFF)
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "template",
"value_template": "{{ states.input_boolean.test.state }}",
"prob_given_true": 1999.9,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == STATE_OFF
events = []
hass.helpers.event.async_track_state_change_event(
"binary_sensor.test_binary", callback(lambda event: events.append(event))
)
context = Context()
hass.states.async_set("input_boolean.test", STATE_ON, context=context)
await hass.async_block_till_done()
await hass.async_block_till_done()
assert events[0].context == context
async def test_state_triggers(hass):
"""Test sensor with state triggers."""
hass.states.async_set("sensor.test_monitored", STATE_OFF)
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 999.9,
"prob_given_false": 999.4,
},
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_binary").state == STATE_OFF
events = []
hass.helpers.event.async_track_state_change_event(
"binary_sensor.test_binary", callback(lambda event: events.append(event))
)
context = Context()
hass.states.async_set("sensor.test_monitored", STATE_ON, context=context)
await hass.async_block_till_done()
await hass.async_block_till_done()
assert events[0].context == context
| 32.243807
| 128
| 0.588613
| 2,776
| 24,731
| 4.977666
| 0.060159
| 0.076712
| 0.059777
| 0.081126
| 0.879577
| 0.859603
| 0.845781
| 0.840208
| 0.814445
| 0.808004
| 0
| 0.017958
| 0.281711
| 24,731
| 766
| 129
| 32.285901
| 0.759908
| 0.001698
| 0
| 0.713087
| 0
| 0
| 0.2593
| 0.125568
| 0
| 0
| 0
| 0
| 0.130872
| 1
| 0.001678
| false
| 0
| 0.015101
| 0.001678
| 0.018456
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9c1d02454d3e9d558e4d7887ce8c67cdec1feafe
| 78
|
py
|
Python
|
cuda.py
|
baophuc27/answer-generation
|
36ab9f84f8d4df90abd2bd0255a5229afbd65892
|
[
"MIT"
] | 3
|
2021-03-25T12:29:49.000Z
|
2021-06-14T13:15:49.000Z
|
cuda.py
|
baophuc27/answer-generation
|
36ab9f84f8d4df90abd2bd0255a5229afbd65892
|
[
"MIT"
] | null | null | null |
cuda.py
|
baophuc27/answer-generation
|
36ab9f84f8d4df90abd2bd0255a5229afbd65892
|
[
"MIT"
] | null | null | null |
import torch
print(torch.cuda.is_available())
print(torch.cuda.device_count())
| 26
| 32
| 0.807692
| 12
| 78
| 5.083333
| 0.666667
| 0.327869
| 0.459016
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 78
| 3
| 33
| 26
| 0.813333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
9c23564476db3f725a1a8ca64ad363e31aa4d23c
| 6,080
|
py
|
Python
|
graphtheory/coloring/edgecolorcs.py
|
mashal02/graphs-dict
|
39917d8a7f3bdcd5d95f3549ca054d16ba535e90
|
[
"BSD-3-Clause"
] | 36
|
2015-09-20T20:55:39.000Z
|
2021-09-20T05:49:03.000Z
|
graphtheory/coloring/edgecolorcs.py
|
mashal02/graphs-dict
|
39917d8a7f3bdcd5d95f3549ca054d16ba535e90
|
[
"BSD-3-Clause"
] | 6
|
2016-03-25T21:41:46.000Z
|
2020-02-12T03:18:59.000Z
|
graphtheory/coloring/edgecolorcs.py
|
mashal02/graphs-dict
|
39917d8a7f3bdcd5d95f3549ca054d16ba535e90
|
[
"BSD-3-Clause"
] | 9
|
2016-09-12T07:57:27.000Z
|
2022-03-21T16:15:39.000Z
|
#!/usr/bin/python
try:
from Queue import Queue
except ImportError: # Python 3
from queue import Queue
xrange = range
class ConnectedSequentialEdgeColoring1:
"""Find a connected sequential (CS) edge coloring.
Attributes
----------
graph : input undirected graph or multigraph
color : dict with edges (values are colors)
parent : dict (BFS tree)
m : number (the number of edges)
saturation : dict with nodes (values are sets of adjacent node colors)
Notes
-----
Colors are 0, 1, 2, ...
edge.source < edge.target for any edge in color.
"""
def __init__(self, graph):
"""The algorithm initialization."""
if graph.is_directed():
raise ValueError("the graph is directed")
self.graph = graph
self.parent = dict()
self.color = dict()
self.m = 0 # graph.e() is slow
for edge in self.graph.iteredges():
if edge.source == edge.target:
raise ValueError("a loop detected")
else:
self.color[edge] = None # edge.source < edge.target
self.m += 1
if len(self.color) < self.m:
raise ValueError("edges are not unique")
self.saturation = dict((node, set()) for node in self.graph.iternodes())
def run(self, source=None):
"""Using BFS to color edges.."""
if source is not None: # only one connected component
self._visit(source)
else:
for node in self.graph.iternodes():
if node not in self.parent:
self._visit(node)
def _visit(self, node):
"""Explore the connected component."""
Q = Queue()
self.parent[node] = None # before Q.put
Q.put(node)
while not Q.empty():
source = Q.get()
for edge in self.graph.iteroutedges(source):
if edge.target not in self.parent:
self.parent[edge.target] = source # before Q.put
Q.put(edge.target)
if edge.source > edge.target:
edge = ~edge
if self.color[edge] is None:
self._greedy_color_with_saturation(edge)
def _greedy_color_with_saturation(self, edge):
"""Give edge the smallest possible color."""
for c in xrange(self.m):
if (c in self.saturation[edge.source] or
c in self.saturation[edge.target]):
continue # color is used
else: # color is free
self.color[edge] = c
self.saturation[edge.source].add(c)
self.saturation[edge.target].add(c)
break
return c
def _get_color(self, edge):
"""Get color."""
if edge.source > edge.target:
edge = ~edge
return self.color[edge]
def show_colors(self):
"""Show edge coloring (undirected graphs)."""
L = []
for source in self.graph.iternodes():
L.append("{} : ".format(source))
for edge in self.graph.iteroutedges(source):
# It should work for multigraphs.
c = self._get_color(edge)
L.append("{}({}) ".format(edge.target, c))
L.append("\n")
print("".join(L))
class ConnectedSequentialEdgeColoring2:
"""Find a connected sequential (CS) edge coloring.
Attributes
----------
graph : input undirected graph or multigraph
color : dict with edges (values are colors)
m : number (the number of edges)
saturation : dict with nodes (values are sets of adjacent node colors)
Notes
-----
Colors are 0, 1, 2, ...
edge.source < edge.target for any edge in color.
"""
def __init__(self, graph):
"""The algorithm initialization."""
if graph.is_directed():
raise ValueError("the graph is directed")
self.graph = graph
self.color = dict()
self.m = 0 # graph.e() is slow
for edge in self.graph.iteredges():
if edge.source == edge.target:
raise ValueError("a loop detected")
else:
self.color[edge] = None # edge.source < edge.target
self.m += 1
if len(self.color) < self.m:
raise ValueError("edges are not unique")
self.saturation = dict((node, set()) for node in self.graph.iternodes())
def run(self, source=None):
"""Using BFS to color edges."""
if source is not None: # only one connected component
start_edge = next(self.graph.iteroutedges(source))
else:
start_edge = next(self.graph.iteredges())
for edge in self.graph.iteredges_connected(start_edge):
self._greedy_color_with_saturation(edge)
def _greedy_color_with_saturation(self, edge):
"""Give edge the smallest possible color."""
for c in xrange(self.m):
if (c in self.saturation[edge.source] or
c in self.saturation[edge.target]):
continue # color is used
else: # color is free
self.color[edge] = c
self.saturation[edge.source].add(c)
self.saturation[edge.target].add(c)
break
return c
def _get_color(self, edge):
"""Get color."""
if edge.source > edge.target:
edge = ~edge
return self.color[edge]
def show_colors(self):
"""Show edge coloring (undirected graphs)."""
L = []
for source in self.graph.iternodes():
L.append("{} : ".format(source))
for edge in self.graph.iteroutedges(source):
# It should work for multigraphs.
c = self._get_color(edge)
L.append("{}({}) ".format(edge.target, c))
L.append("\n")
print("".join(L))
ConnectedSequentialEdgeColoring = ConnectedSequentialEdgeColoring2
# EOF
| 33.96648
| 80
| 0.550329
| 712
| 6,080
| 4.639045
| 0.168539
| 0.054496
| 0.036633
| 0.054496
| 0.84832
| 0.815017
| 0.798668
| 0.778686
| 0.778686
| 0.778686
| 0
| 0.003483
| 0.33898
| 6,080
| 178
| 81
| 34.157303
| 0.818363
| 0.225658
| 0
| 0.773913
| 0
| 0
| 0.031001
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095652
| false
| 0
| 0.026087
| 0
| 0.173913
| 0.017391
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9c26f6d3f7a59cfb0b8d0c30dd55c87baa499eba
| 77,272
|
py
|
Python
|
tests/unittests/import_export_tests.py
|
williamcorsel/fiftyone
|
22e34e91deb1d2e2fe6316ec81714e0c55015523
|
[
"Apache-2.0"
] | null | null | null |
tests/unittests/import_export_tests.py
|
williamcorsel/fiftyone
|
22e34e91deb1d2e2fe6316ec81714e0c55015523
|
[
"Apache-2.0"
] | null | null | null |
tests/unittests/import_export_tests.py
|
williamcorsel/fiftyone
|
22e34e91deb1d2e2fe6316ec81714e0c55015523
|
[
"Apache-2.0"
] | null | null | null |
"""
FiftyOne import/export-related unit tests.
| Copyright 2017-2022, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import os
import random
import string
import unittest
import cv2
import numpy as np
import pytest
import eta.core.image as etai
import eta.core.utils as etau
import eta.core.video as etav
import fiftyone as fo
import fiftyone.utils.coco as fouc
import fiftyone.utils.yolo as fouy
from decorators import drop_datasets
skipwindows = pytest.mark.skipif(
os.name == "nt", reason="Windows hangs in workflows, fix me"
)
class ImageDatasetTests(unittest.TestCase):
def setUp(self):
temp_dir = etau.TempDir()
tmp_dir = temp_dir.__enter__()
ref_image_path = os.path.join(tmp_dir, "_ref_image.jpg")
images_dir = os.path.join(tmp_dir, "_images")
img = np.random.randint(255, size=(480, 640, 3), dtype=np.uint8)
etai.write(img, ref_image_path)
self._temp_dir = temp_dir
self._tmp_dir = tmp_dir
self._ref_image_path = ref_image_path
self.images_dir = images_dir
def tearDown(self):
self._temp_dir.__exit__()
def _new_image(self, name=None):
if name is None:
name = self._new_name()
filepath = os.path.join(
self.images_dir,
name + os.path.splitext(self._ref_image_path)[1],
)
etau.copy_file(self._ref_image_path, filepath)
return filepath
def _new_name(self):
return "".join(
random.choice(string.ascii_lowercase + string.digits)
for _ in range(24)
)
def _new_dir(self):
return os.path.join(self._tmp_dir, self._new_name())
class DuplicateImageExportTests(ImageDatasetTests):
@skipwindows
@drop_datasets
def test_duplicate_images(self):
sample = fo.Sample(
filepath=self._new_image(),
cls=fo.Classification(label="sunny"),
det=fo.Detections(
detections=[
fo.Detection(label="cat", bounding_box=[0, 0, 1, 1])
]
),
)
# This dataset contains two samples with the same `filepath`
dataset = fo.Dataset()
dataset.add_samples([sample, sample])
export_dir = self._new_dir()
#
# In general, duplicate copies of the same images are NOT created
#
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.ImageDirectory,
overwrite=True,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir, dataset_type=fo.types.ImageDirectory
)
# We didn't create a duplicate image during export, so there's only
# one image to import here
self.assertEqual(len(dataset2), 1)
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.FiftyOneDataset,
overwrite=True,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir, dataset_type=fo.types.FiftyOneDataset
)
self.assertEqual(len(dataset2), 2)
# Use COCODetectionDataset as a representative for other labeled image
# dataset types
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.COCODetectionDataset,
overwrite=True,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir, dataset_type=fo.types.COCODetectionDataset
)
self.assertEqual(len(dataset2), 2)
#
# The one exception is labeled dataset types where the location of the
# exported media encodes the label (what if the same image has
# different labels in different samples). In this case, duplicate
# images ARE exported
#
#
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.ImageClassificationDirectoryTree,
overwrite=True,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.ImageClassificationDirectoryTree,
)
self.assertEqual(len(dataset2), 2)
class ImageExportCoersionTests(ImageDatasetTests):
@drop_datasets
def test_field_inference(self):
sample = fo.Sample(
filepath=self._new_image(),
ground_truth=fo.Detections(
detections=[
fo.Detection(
label="cat",
bounding_box=[0.1, 0.1, 0.4, 0.4],
),
fo.Detection(
label="dog",
bounding_box=[0.5, 0.5, 0.4, 0.4],
),
]
),
)
dataset = fo.Dataset()
dataset.add_sample(sample)
#
# A field of appropriate type is inferred
#
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.COCODetectionDataset,
)
#
# Multiple compatible field types exist, but the first one is still
# chosen and used
#
dataset.clone_sample_field("ground_truth", "predictions")
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.COCODetectionDataset,
)
@drop_datasets
def test_patch_exports(self):
sample = fo.Sample(
filepath=self._new_image(),
ground_truth=fo.Detections(
detections=[
fo.Detection(
label="cat",
bounding_box=[0.1, 0.1, 0.4, 0.4],
),
fo.Detection(
label="dog",
bounding_box=[0.5, 0.5, 0.4, 0.4],
),
]
),
)
dataset = fo.Dataset()
dataset.add_sample(sample)
#
# No label field is provided; only images are exported
#
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.ImageDirectory,
)
#
# A detections field is provided, so the object patches are exported as
# a directory of images
#
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.ImageDirectory,
label_field="ground_truth",
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.ImageDirectory,
)
self.assertEqual(
len(dataset2), dataset.count("ground_truth.detections")
)
#
# A detections field is provided, so the object patches are exported as
# an image classification directory tree
#
export_dir3 = self._new_dir()
dataset.export(
export_dir=export_dir3,
dataset_type=fo.types.ImageClassificationDirectoryTree,
label_field="ground_truth",
)
dataset3 = fo.Dataset.from_dir(
dataset_dir=export_dir3,
dataset_type=fo.types.ImageClassificationDirectoryTree,
)
self.assertEqual(
len(dataset3), dataset.count("ground_truth.detections")
)
@drop_datasets
def test_single_label_to_lists(self):
sample = fo.Sample(
filepath=self._new_image(),
ground_truth=fo.Detection(
label="cat",
bounding_box=[0.1, 0.1, 0.4, 0.4],
),
)
dataset = fo.Dataset()
dataset.add_sample(sample)
#
# The `ground_truth` field has type `Detection`, but COCO format
# expects `Detections`, so the labels are automatically coerced to
# single-label lists
#
export_dir4 = self._new_dir()
dataset.export(
export_dir=export_dir4,
dataset_type=fo.types.COCODetectionDataset,
label_field="ground_truth",
)
@drop_datasets
def test_classification_as_detections(self):
sample = fo.Sample(
filepath=self._new_image(),
animal=fo.Classification(label="cat"),
)
dataset = fo.Dataset()
dataset.add_sample(sample)
#
# The `animal` field is exported as detections that span entire images
#
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.COCODetectionDataset,
label_field="animal",
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.COCODetectionDataset,
label_field="animal",
)
bounding_box = dataset2.first().animal.detections[0].bounding_box
self.assertTrue(np.allclose(bounding_box, [0, 0, 1, 1]))
class UnlabeledImageDatasetTests(ImageDatasetTests):
def _make_dataset(self):
samples = [fo.Sample(filepath=self._new_image()) for _ in range(5)]
dataset = fo.Dataset()
dataset.add_samples(samples)
return dataset
@drop_datasets
def test_image_directory(self):
dataset = self._make_dataset()
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.ImageDirectory,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.ImageDirectory,
)
self.assertEqual(len(dataset), len(dataset2))
class ImageClassificationDatasetTests(ImageDatasetTests):
def _make_dataset(self):
samples = [
fo.Sample(
filepath=self._new_image(),
predictions=fo.Classification(label="cat", confidence=0.9),
),
fo.Sample(
filepath=self._new_image(),
predictions=fo.Classification(label="dog", confidence=0.95),
),
fo.Sample(filepath=self._new_image()),
]
dataset = fo.Dataset()
dataset.add_samples(samples)
return dataset
@drop_datasets
def test_fiftyone_image_classification_dataset(self):
dataset = self._make_dataset()
# Standard format
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.FiftyOneImageClassificationDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.FiftyOneImageClassificationDataset,
label_field="predictions",
)
self.assertEqual(len(dataset), len(dataset2))
self.assertEqual(
dataset.count("predictions"), dataset2.count("predictions")
)
# Include confidence
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.FiftyOneImageClassificationDataset,
include_confidence=True,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.FiftyOneImageClassificationDataset,
label_field="predictions",
)
confs = dataset.values("predictions.confidence", missing_value=-1)
confs2 = dataset2.values("predictions.confidence", missing_value=-1)
self.assertEqual(len(dataset), len(dataset2))
# sorting is necessary because sample order is arbitrary
self.assertTrue(np.allclose(sorted(confs), sorted(confs2)))
# Labels-only
data_path = self.images_dir
labels_path = os.path.join(self._new_dir(), "labels.json")
dataset.export(
dataset_type=fo.types.FiftyOneImageClassificationDataset,
labels_path=labels_path,
)
dataset2 = fo.Dataset.from_dir(
dataset_type=fo.types.FiftyOneImageClassificationDataset,
data_path=data_path,
labels_path=labels_path,
label_field="predictions",
)
self.assertEqual(len(dataset), len(dataset2))
self.assertSetEqual(
set(dataset.values("filepath")),
set(dataset2.values("filepath")),
)
self.assertEqual(
dataset.count("predictions"),
dataset2.count("predictions"),
)
@drop_datasets
def test_image_classification_directory_tree(self):
dataset = self._make_dataset()
# Standard format
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.ImageClassificationDirectoryTree,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.ImageClassificationDirectoryTree,
label_field="predictions",
)
self.assertEqual(len(dataset), len(dataset2))
self.assertEqual(
dataset.count("predictions"), dataset2.count("predictions")
)
@drop_datasets
def test_tf_image_classification_dataset(self):
dataset = self._make_dataset()
# Standard format
export_dir = self._new_dir()
images_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.TFImageClassificationDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.TFImageClassificationDataset,
images_dir=images_dir,
label_field="predictions",
)
self.assertEqual(len(dataset), len(dataset2))
self.assertEqual(
dataset.count("predictions"), dataset2.count("predictions")
)
# Direct records path w/ sharding
tf_records_path = os.path.join(self._new_dir(), "tf.records")
tf_records_patt = tf_records_path + "-*-of-*"
images_dir = self._new_dir()
dataset.export(
dataset_type=fo.types.TFImageClassificationDataset,
tf_records_path=tf_records_path,
num_shards=2,
)
dataset2 = fo.Dataset.from_dir(
dataset_type=fo.types.TFImageClassificationDataset,
tf_records_path=tf_records_patt,
images_dir=images_dir,
label_field="predictions",
)
self.assertEqual(len(dataset), len(dataset2))
self.assertEqual(
dataset.count("predictions"), dataset2.count("predictions")
)
class ImageChannelsDatasetTests(ImageDatasetTests):
def _make_dataset(self):
samples = [
fo.Sample(
filepath=self._new_image(),
predictions=fo.Classification(label="cat", confidence=0.9),
),
fo.Sample(
filepath=self._new_image(),
predictions=fo.Classification(label="dog", confidence=0.95),
),
]
dataset = fo.Dataset()
dataset.add_samples(samples)
return dataset
@skipwindows
@drop_datasets
def test_tf_image_classification_channels(self):
orig_dataset = self._make_dataset()
# Export grayscale images
export_dir1 = self._new_dir()
for idx, sample in enumerate(orig_dataset, 1):
label = sample.predictions.label
outpath = os.path.join(export_dir1, label, "%06d.png" % idx)
# pylint: disable=no-member
img = etai.read(sample.filepath, flag=cv2.IMREAD_GRAYSCALE)
etai.write(img, outpath)
gray_dataset1 = fo.Dataset.from_dir(
dataset_dir=export_dir1,
dataset_type=fo.types.ImageClassificationDirectoryTree,
)
gray_dataset1.compute_metadata()
self.assertEqual(gray_dataset1.first().metadata.num_channels, 1)
export_dir2 = self._new_dir()
# Export grayscale
gray_dataset1.export(
export_dir=export_dir2,
dataset_type=fo.types.TFImageClassificationDataset,
overwrite=True,
)
# Import grayscale
gray_dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir2,
dataset_type=fo.types.TFImageClassificationDataset,
images_dir=os.path.join(export_dir2, "images-gray"),
)
gray_dataset2.compute_metadata()
self.assertEqual(gray_dataset2.first().metadata.num_channels, 1)
# Force RGB at import-time
rgb_dataset1 = fo.Dataset.from_dir(
dataset_dir=export_dir2,
dataset_type=fo.types.TFImageClassificationDataset,
images_dir=os.path.join(export_dir2, "images-rgb"),
force_rgb=True,
)
rgb_dataset1.compute_metadata()
self.assertEqual(rgb_dataset1.first().metadata.num_channels, 3)
export_dir3 = self._new_dir()
# Force RGB at export-time
gray_dataset1.export(
export_dir=export_dir3,
dataset_type=fo.types.TFImageClassificationDataset,
force_rgb=True,
)
# Import RGB
rgb_dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir3,
dataset_type=fo.types.TFImageClassificationDataset,
images_dir=os.path.join(export_dir3, "images"),
)
rgb_dataset2.compute_metadata()
self.assertEqual(rgb_dataset2.first().metadata.num_channels, 3)
class ImageClassificationsDatasetTests(ImageDatasetTests):
def _make_dataset(self):
samples = [
fo.Sample(
filepath=self._new_image(),
predictions=fo.Classifications(
classifications=[
fo.Classification(label="cat", confidence=0.9)
]
),
),
fo.Sample(
filepath=self._new_image(),
predictions=fo.Classifications(
classifications=[
fo.Classification(label="dog", confidence=0.95)
]
),
),
fo.Sample(filepath=self._new_image()),
]
dataset = fo.Dataset()
dataset.add_samples(samples)
return dataset
@drop_datasets
def test_fiftyone_image_classification_dataset(self):
dataset = self._make_dataset()
# Standard format
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.FiftyOneImageClassificationDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.FiftyOneImageClassificationDataset,
label_field="predictions",
)
self.assertEqual(len(dataset), len(dataset2))
self.assertEqual(
dataset.count("predictions.classifications"),
dataset2.count("predictions.classifications"),
)
# Include confidence
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.FiftyOneImageClassificationDataset,
include_confidence=True,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.FiftyOneImageClassificationDataset,
label_field="predictions",
)
confs = dataset.values(
"predictions.classifications.confidence",
missing_value=-1,
unwind=True,
)
confs2 = dataset2.values(
"predictions.classifications.confidence",
missing_value=-1,
unwind=True,
)
self.assertEqual(len(dataset), len(dataset2))
# sorting is necessary because sample order is arbitrary
self.assertTrue(np.allclose(sorted(confs), sorted(confs2)))
class ImageDetectionDatasetTests(ImageDatasetTests):
def _make_dataset(self):
samples = [
fo.Sample(
filepath=self._new_image(),
predictions=fo.Detections(
detections=[
fo.Detection(
label="cat",
bounding_box=[0.1, 0.1, 0.4, 0.4],
),
fo.Detection(
label="dog",
bounding_box=[0.5, 0.5, 0.4, 0.4],
),
]
),
),
fo.Sample(
filepath=self._new_image(),
predictions=fo.Detections(
detections=[
fo.Detection(
label="cat",
bounding_box=[0.1, 0.1, 0.4, 0.4],
confidence=0.9,
age=51,
cute=True,
mood="surly",
),
fo.Detection(
label="dog",
bounding_box=[0.5, 0.5, 0.4, 0.4],
confidence=0.95,
age=52,
cute=False,
mood="derpy",
),
]
),
),
fo.Sample(filepath=self._new_image()),
]
dataset = fo.Dataset()
dataset.add_samples(samples)
return dataset
@drop_datasets
def test_fiftyone_image_detection_dataset(self):
dataset = self._make_dataset()
# Standard format
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.FiftyOneImageDetectionDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.FiftyOneImageDetectionDataset,
label_field="predictions",
)
self.assertEqual(len(dataset), len(dataset2))
self.assertEqual(
dataset.count("predictions.detections"),
dataset2.count("predictions.detections"),
)
self.assertEqual(
dataset.distinct("predictions.detections.confidence"),
dataset2.distinct("predictions.detections.confidence"),
)
self.assertEqual(
dataset.distinct("predictions.detections.age"),
dataset2.distinct("predictions.detections.age"),
)
self.assertEqual(
dataset.distinct("predictions.detections.cute"),
dataset2.distinct("predictions.detections.cute"),
)
self.assertEqual(
dataset.distinct("predictions.detections.mood"),
dataset2.distinct("predictions.detections.mood"),
)
# Labels-only
data_path = self.images_dir
labels_path = os.path.join(self._new_dir(), "labels.json")
dataset.export(
dataset_type=fo.types.FiftyOneImageDetectionDataset,
labels_path=labels_path,
)
dataset2 = fo.Dataset.from_dir(
dataset_type=fo.types.FiftyOneImageDetectionDataset,
data_path=data_path,
labels_path=labels_path,
label_field="predictions",
)
self.assertEqual(len(dataset), len(dataset2))
self.assertSetEqual(
set(dataset.values("filepath")),
set(dataset2.values("filepath")),
)
self.assertEqual(
dataset.count("predictions.detections"),
dataset2.count("predictions.detections"),
)
@drop_datasets
def test_tf_object_detection_dataset(self):
dataset = self._make_dataset()
# Standard format
export_dir = self._new_dir()
images_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.TFObjectDetectionDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.TFObjectDetectionDataset,
images_dir=images_dir,
label_field="predictions",
)
self.assertEqual(len(dataset), len(dataset2))
self.assertEqual(
dataset.count("predictions.detections"),
dataset2.count("predictions.detections"),
)
# Direct records path w/ sharding
tf_records_path = os.path.join(self._new_dir(), "tf.records")
tf_records_patt = tf_records_path + "-*-of-*"
images_dir = self._new_dir()
dataset.export(
dataset_type=fo.types.TFObjectDetectionDataset,
tf_records_path=tf_records_path,
num_shards=2,
)
dataset2 = fo.Dataset.from_dir(
dataset_type=fo.types.TFObjectDetectionDataset,
tf_records_path=tf_records_patt,
images_dir=images_dir,
label_field="predictions",
)
self.assertEqual(len(dataset), len(dataset2))
self.assertEqual(
dataset.count("predictions.detections"),
dataset2.count("predictions.detections"),
)
@drop_datasets
def test_coco_detection_dataset(self):
dataset = self._make_dataset()
# Standard format
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.COCODetectionDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.COCODetectionDataset,
label_field="predictions",
)
self.assertEqual(len(dataset), len(dataset2))
self.assertEqual(
dataset.count("predictions.detections"),
dataset2.count("predictions.detections"),
)
self.assertEqual(
dataset.distinct("predictions.detections.confidence"),
dataset2.distinct("predictions.detections.confidence"),
)
self.assertEqual(
dataset.distinct("predictions.detections.age"),
dataset2.distinct("predictions.detections.age"),
)
self.assertEqual(
dataset.distinct("predictions.detections.cute"),
dataset2.distinct("predictions.detections.cute"),
)
self.assertEqual(
dataset.distinct("predictions.detections.mood"),
dataset2.distinct("predictions.detections.mood"),
)
# Omit extra attributes
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.COCODetectionDataset,
extra_attrs=False,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.COCODetectionDataset,
label_field="predictions",
)
self.assertEqual(dataset2.distinct("predictions.detections.age"), [])
self.assertEqual(dataset2.distinct("predictions.detections.cute"), [])
self.assertEqual(dataset2.distinct("predictions.detections.mood"), [])
# Labels-only
data_path = self.images_dir
labels_path = os.path.join(self._new_dir(), "labels.json")
dataset.export(
dataset_type=fo.types.COCODetectionDataset,
labels_path=labels_path,
)
dataset2 = fo.Dataset.from_dir(
dataset_type=fo.types.COCODetectionDataset,
data_path=data_path,
labels_path=labels_path,
label_field="predictions",
)
self.assertEqual(len(dataset), len(dataset2))
self.assertSetEqual(
set(dataset.values("filepath")),
set(dataset2.values("filepath")),
)
self.assertEqual(
dataset.count("predictions.detections"),
dataset2.count("predictions.detections"),
)
@drop_datasets
def test_voc_detection_dataset(self):
dataset = self._make_dataset()
# Standard format
export_dir = self._new_dir()
view = dataset.limit(2)
view.export(
export_dir=export_dir,
dataset_type=fo.types.VOCDetectionDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.VOCDetectionDataset,
label_field="predictions",
)
self.assertEqual(len(view), len(dataset2))
self.assertEqual(
view.count("predictions.detections"),
dataset2.count("predictions.detections"),
)
self.assertEqual(
view.distinct("predictions.detections.age"),
dataset2.distinct("predictions.detections.age"),
)
self.assertEqual(
view.distinct("predictions.detections.cute"),
dataset2.distinct("predictions.detections.cute"),
)
self.assertEqual(
view.distinct("predictions.detections.mood"),
dataset2.distinct("predictions.detections.mood"),
)
# Handle unlabeled data
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.VOCDetectionDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.VOCDetectionDataset,
label_field="predictions",
include_all_data=True,
)
self.assertEqual(len(dataset), len(dataset2))
# Labels-only
data_path = self.images_dir
labels_path = os.path.join(self._new_dir(), "labels.xml")
dataset.export(
dataset_type=fo.types.VOCDetectionDataset,
labels_path=labels_path,
)
dataset2 = fo.Dataset.from_dir(
dataset_type=fo.types.VOCDetectionDataset,
data_path=data_path,
labels_path=labels_path,
label_field="predictions",
include_all_data=True,
)
self.assertEqual(len(dataset), len(dataset2))
self.assertSetEqual(
set(dataset.values("filepath")),
set(dataset2.values("filepath")),
)
self.assertEqual(
dataset.count("predictions.detections"),
dataset2.count("predictions.detections"),
)
@drop_datasets
def test_kitti_detection_dataset(self):
dataset = self._make_dataset()
# Standard format
export_dir = self._new_dir()
view = dataset.limit(2)
view.export(
export_dir=export_dir,
dataset_type=fo.types.KITTIDetectionDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.KITTIDetectionDataset,
label_field="predictions",
)
self.assertEqual(len(view), len(dataset2))
self.assertEqual(
view.count("predictions.detections"),
dataset2.count("predictions.detections"),
)
self.assertEqual(
view.distinct("predictions.detections.confidence"),
dataset2.distinct("predictions.detections.confidence"),
)
# Handle unlabeled data
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.KITTIDetectionDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.KITTIDetectionDataset,
label_field="predictions",
include_all_data=True,
)
self.assertEqual(len(dataset), len(dataset2))
# Labels-only
data_path = self.images_dir
labels_path = os.path.join(self._new_dir(), "labels/")
dataset.export(
dataset_type=fo.types.KITTIDetectionDataset,
labels_path=labels_path,
)
dataset2 = fo.Dataset.from_dir(
dataset_type=fo.types.KITTIDetectionDataset,
data_path=data_path,
labels_path=labels_path,
label_field="predictions",
include_all_data=True,
)
self.assertEqual(len(dataset), len(dataset2))
self.assertSetEqual(
set(dataset.values("filepath")),
set(dataset2.values("filepath")),
)
self.assertEqual(
dataset.count("predictions.detections"),
dataset2.count("predictions.detections"),
)
@drop_datasets
def test_yolov4_dataset(self):
dataset = self._make_dataset()
# Standard format
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.YOLOv4Dataset,
label_field="predictions",
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.YOLOv4Dataset,
label_field="predictions",
include_all_data=True,
)
self.assertEqual(len(dataset), len(dataset2))
self.assertEqual(
dataset.count("predictions.detections"),
dataset2.count("predictions.detections"),
)
# Include confidence
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.YOLOv4Dataset,
label_field="predictions",
include_confidence=True,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.YOLOv4Dataset,
label_field="predictions",
include_all_data=True,
)
bounds = dataset.bounds("predictions.detections.confidence")
bounds2 = dataset2.bounds("predictions.detections.confidence")
self.assertAlmostEqual(bounds[0], bounds2[0])
self.assertAlmostEqual(bounds[1], bounds2[1])
# Labels-only
data_path = os.path.dirname(dataset.first().filepath)
labels_path = os.path.join(self._new_dir(), "labels/")
dataset.export(
dataset_type=fo.types.YOLOv4Dataset,
labels_path=labels_path,
)
dataset2 = fo.Dataset.from_dir(
dataset_type=fo.types.YOLOv4Dataset,
data_path=data_path,
labels_path=labels_path,
label_field="predictions",
include_all_data=True,
)
self.assertEqual(len(dataset), len(dataset2))
self.assertEqual(
dataset.count("predictions.detections"),
dataset2.count("predictions.detections"),
)
for sample in dataset2:
self.assertTrue(os.path.isfile(sample.filepath))
@drop_datasets
def test_yolov5_dataset(self):
dataset = self._make_dataset()
# Standard format
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.YOLOv5Dataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.YOLOv5Dataset,
label_field="predictions",
include_all_data=True,
)
self.assertEqual(len(dataset), len(dataset2))
self.assertEqual(
dataset.count("predictions.detections"),
dataset2.count("predictions.detections"),
)
# Include confidence
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.YOLOv5Dataset,
include_confidence=True,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.YOLOv5Dataset,
label_field="predictions",
)
bounds = dataset.bounds("predictions.detections.confidence")
bounds2 = dataset2.bounds("predictions.detections.confidence")
self.assertAlmostEqual(bounds[0], bounds2[0])
self.assertAlmostEqual(bounds[1], bounds2[1])
@drop_datasets
def test_add_yolo_labels(self):
dataset = self._make_dataset()
classes = dataset.distinct("predictions.detections.label")
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.YOLOv5Dataset,
)
yolo_labels_path = os.path.join(export_dir, "labels", "val")
# Standard
fouy.add_yolo_labels(
dataset, "yolo", labels_path=yolo_labels_path, classes=classes
)
self.assertEqual(
dataset.count_values("predictions.detections.label"),
dataset.count_values("yolo.detections.label"),
)
self.assertEqual(1, len(dataset) - len(dataset.exists("yolo")))
# Include missing
fouy.add_yolo_labels(
dataset,
"yolo_inclusive",
labels_path=yolo_labels_path,
classes=classes,
include_missing=True,
)
self.assertEqual(
dataset.count_values("predictions.detections.label"),
dataset.count_values("yolo_inclusive.detections.label"),
)
self.assertEqual(len(dataset), len(dataset.exists("yolo_inclusive")))
@skipwindows
@drop_datasets
def test_add_coco_labels(self):
dataset = self._make_dataset()
classes = dataset.distinct("predictions.detections.label")
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.COCODetectionDataset,
)
coco_labels_path = os.path.join(export_dir, "labels.json")
fouc.add_coco_labels(
dataset, "coco", coco_labels_path, classes=classes
)
self.assertEqual(
dataset.count_values("predictions.detections.label"),
dataset.count_values("coco.detections.label"),
)
self.assertEqual(len(dataset), len(dataset.exists("coco")))
class ImageSegmentationDatasetTests(ImageDatasetTests):
def _make_dataset(self):
mask1 = np.zeros((128, 128), dtype=np.uint8)
mask1[32:96, 32:96] = 255
mask2 = 255 * np.ones((128, 128), dtype=np.uint8)
mask2[32:96, 32:96] = 0
instance1 = np.zeros((32, 32), dtype=bool)
instance1[8:24, 8:24] = True
instance2 = np.ones((32, 32), dtype=bool)
instance2[8:24, 8:24] = False
samples = [
fo.Sample(
filepath=self._new_image(),
segmentations=fo.Segmentation(mask=mask1),
detections=fo.Detections(
detections=[
fo.Detection(
label="cat",
bounding_box=[0.1, 0.1, 0.4, 0.4],
mask=instance1,
),
fo.Detection(
label="dog",
bounding_box=[0.5, 0.5, 0.4, 0.4],
mask=instance2,
),
]
),
polylines=fo.Polylines(
polylines=[
fo.Polyline(
label="cat",
points=[
[
(0.1, 0.1),
(0.5, 0.1),
(0.5, 0.5),
(0.1, 0.5),
]
],
filled=True,
),
fo.Polyline(
label="dog",
points=[
[
(0.5, 0.5),
(0.9, 0.5),
(0.9, 0.9),
(0.5, 0.9),
]
],
filled=True,
),
]
),
),
fo.Sample(
filepath=self._new_image(),
segmentations=fo.Segmentation(mask=mask2),
detections=fo.Detections(
detections=[
fo.Detection(
label="cat",
bounding_box=[0.1, 0.1, 0.4, 0.4],
mask=instance2,
),
fo.Detection(
label="dog",
bounding_box=[0.5, 0.5, 0.4, 0.4],
mask=instance1,
),
]
),
polylines=fo.Polylines(
polylines=[
fo.Polyline(
label="cat",
points=[
[
(0.1, 0.1),
(0.5, 0.1),
(0.5, 0.5),
(0.1, 0.5),
]
],
filled=True,
),
fo.Polyline(
label="dog",
points=[
[
(0.5, 0.5),
(0.9, 0.5),
(0.9, 0.9),
(0.5, 0.9),
]
],
filled=True,
),
]
),
),
fo.Sample(filepath=self._new_image()),
]
dataset = fo.Dataset()
dataset.add_samples(samples)
return dataset
@drop_datasets
def test_image_segmentation_directory(self):
dataset = self._make_dataset()
# Segmentations
export_dir = self._new_dir()
view = dataset.limit(2)
view.export(
export_dir=export_dir,
dataset_type=fo.types.ImageSegmentationDirectory,
label_field="segmentations",
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.ImageSegmentationDirectory,
label_field="segmentations",
)
self.assertEqual(len(view), len(dataset2))
self.assertEqual(
view.count("segmentations.mask"),
dataset2.count("segmentations.mask"),
)
# Detections
export_dir = self._new_dir()
view = dataset.limit(2)
view.export(
export_dir=export_dir,
dataset_type=fo.types.ImageSegmentationDirectory,
label_field="detections",
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.ImageSegmentationDirectory,
label_field="segmentations",
)
self.assertEqual(len(view), len(dataset2))
self.assertEqual(
len(view.exists("detections")),
len(dataset2.exists("segmentations")),
)
# Polylines
export_dir = self._new_dir()
view = dataset.limit(2)
view.export(
export_dir=export_dir,
dataset_type=fo.types.ImageSegmentationDirectory,
label_field="polylines",
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.ImageSegmentationDirectory,
label_field="segmentations",
)
self.assertEqual(len(view), len(dataset2))
self.assertEqual(
len(view.exists("polylines")),
len(dataset2.exists("segmentations")),
)
# Labels-only
data_path = self.images_dir
labels_path = os.path.join(self._new_dir(), "labels/")
dataset.export(
dataset_type=fo.types.ImageSegmentationDirectory,
labels_path=labels_path,
label_field="segmentations",
)
dataset2 = fo.Dataset.from_dir(
dataset_type=fo.types.ImageSegmentationDirectory,
data_path=data_path,
labels_path=labels_path,
label_field="segmentations",
include_all_data=True,
)
self.assertEqual(len(dataset), len(dataset2))
self.assertSetEqual(
set(dataset.values("filepath")),
set(dataset2.values("filepath")),
)
self.assertEqual(
dataset.count("segmentations.mask"),
dataset2.count("segmentations.mask"),
)
class DICOMDatasetTests(ImageDatasetTests):
def _get_dcm_path(self):
import pydicom # pylint: disable=unused-import
from pydicom.data import get_testdata_file
return get_testdata_file("MR_small.dcm")
@drop_datasets
def test_dicom_dataset(self):
dataset_dir = self._new_dir()
images_dir = self._new_dir()
ref_path = self._get_dcm_path()
dicom_path = os.path.join(dataset_dir, "test.dcm")
etau.copy_file(ref_path, dicom_path)
# Standard format
dataset = fo.Dataset.from_dir(
dataset_dir=dataset_dir,
images_dir=images_dir,
dataset_type=fo.types.DICOMDataset,
)
self.assertEqual(len(dataset), 1)
self.assertIn("PatientName", dataset.get_field_schema())
# Direct path, specific keywords
dataset2 = fo.Dataset.from_dir(
dicom_path=dicom_path,
images_dir=images_dir,
dataset_type=fo.types.DICOMDataset,
keywords=["PatientName"],
)
self.assertEqual(len(dataset2), 1)
self.assertIn("PatientName", dataset2.get_field_schema())
class GeoLocationDatasetTests(ImageDatasetTests):
def _make_dataset(self):
samples = [
fo.Sample(
filepath=self._new_image(),
coordinates=fo.GeoLocation(
point=[-73.77615468583421, 40.76392586346787],
),
weather=fo.Classification(label="sunny"),
),
fo.Sample(
filepath=self._new_image(),
coordinates=fo.GeoLocation(
point=[-74.00767702771716, 40.72345200411182],
),
weather=fo.Classification(label="cloudy"),
),
# @todo test with missing data; this currently may fail since
# `add_samples()` does not graefully handle expanding the schema
# to handle None-valued fields
# fo.Sample(filepath=self._new_image()),
]
dataset = fo.Dataset()
dataset.add_samples(samples)
return dataset
@drop_datasets
def test_geojson_dataset(self):
dataset = self._make_dataset()
# Standard format
export_dir = self._new_dir()
def maker(label):
return label.label if label is not None else None
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.GeoJSONDataset,
property_makers={"weather": maker},
)
def parser(value):
return (
fo.Classification(label=value) if value is not None else None
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.GeoJSONDataset,
location_field="coordinates",
property_parsers={"weather": parser},
)
self.assertEqual(len(dataset), len(dataset2))
self.assertEqual(
dataset.count("coordinates"), dataset2.count("coordinates")
)
self.assertEqual(dataset.count("weather"), dataset2.count("weather"))
# Labels-only
data_path = self.images_dir
labels_path = os.path.join(self._new_dir(), "labels.json")
dataset.export(
labels_path=labels_path,
dataset_type=fo.types.GeoJSONDataset,
)
dataset2 = fo.Dataset.from_dir(
data_path=data_path,
labels_path=labels_path,
dataset_type=fo.types.GeoJSONDataset,
location_field="coordinates",
)
self.assertEqual(len(dataset), len(dataset2))
self.assertSetEqual(
set(dataset.values("filepath")),
set(dataset2.values("filepath")),
)
self.assertEqual(
dataset.count("coordinates"), dataset2.count("coordinates")
)
class MultitaskImageDatasetTests(ImageDatasetTests):
def _make_dataset(self):
samples = [
fo.Sample(
filepath=self._new_image(),
weather=fo.Classification(label="sunny", confidence=0.9),
predictions=fo.Detections(
detections=[
fo.Detection(
label="cat",
bounding_box=[0.1, 0.1, 0.4, 0.4],
),
fo.Detection(
label="dog",
bounding_box=[0.5, 0.5, 0.4, 0.4],
),
]
),
),
fo.Sample(
filepath=self._new_image(),
weather=fo.Classification(label="cloudy", confidence=0.95),
predictions=fo.Detections(
detections=[
fo.Detection(
label="cat",
bounding_box=[0.1, 0.1, 0.4, 0.4],
confidence=0.9,
age=51,
cute=True,
mood="surly",
),
fo.Detection(
label="dog",
bounding_box=[0.5, 0.5, 0.4, 0.4],
confidence=0.95,
age=52,
cute=False,
mood="derpy",
),
]
),
),
fo.Sample(filepath=self._new_image()),
]
dataset = fo.Dataset()
dataset.add_samples(samples)
return dataset
@drop_datasets
def test_fiftyone_image_labels_dataset(self):
dataset = self._make_dataset()
# Standard format
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.FiftyOneImageLabelsDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.FiftyOneImageLabelsDataset,
)
self.assertEqual(len(dataset), len(dataset2))
self.assertEqual(
dataset.count("weather"),
dataset2.count("attributes"),
)
self.assertEqual(
dataset.distinct("weather.confidence"),
dataset2.distinct("attributes.confidence"),
)
self.assertEqual(
dataset.count("predictions.detections"),
dataset2.count("detections.detections"),
)
self.assertEqual(
dataset.distinct("predictions.detections.confidence"),
dataset2.distinct("detections.detections.confidence"),
)
@drop_datasets
def test_bdd_dataset(self):
dataset = self._make_dataset()
# Standard format
export_dir = self._new_dir()
view = dataset.limit(2)
view.export(
export_dir=export_dir,
dataset_type=fo.types.BDDDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.BDDDataset,
)
self.assertEqual(len(view), len(dataset2))
self.assertEqual(
view.count("weather"),
dataset2.count("attributes"),
)
self.assertEqual(
view.count("predictions.detections"),
dataset2.count("detections.detections"),
)
# Handle unlabeled data
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.BDDDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.BDDDataset,
include_all_data=True,
)
self.assertEqual(len(dataset), len(dataset2))
# Labels-only
data_path = self.images_dir
labels_path = os.path.join(self._new_dir(), "labels.json")
dataset.export(
labels_path=labels_path,
dataset_type=fo.types.BDDDataset,
)
dataset2 = fo.Dataset.from_dir(
data_path=data_path,
labels_path=labels_path,
dataset_type=fo.types.BDDDataset,
include_all_data=True,
)
self.assertEqual(len(dataset), len(dataset2))
self.assertEqual(
dataset.count("weather"),
dataset2.count("attributes"),
)
self.assertEqual(
dataset.count("predictions.detections"),
dataset2.count("detections.detections"),
)
@drop_datasets
def test_cvat_image_dataset(self):
dataset = self._make_dataset()
# Standard format
export_dir = self._new_dir()
view = dataset.limit(2)
view.export(
export_dir=export_dir,
dataset_type=fo.types.CVATImageDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.CVATImageDataset,
)
self.assertEqual(len(view), len(dataset2))
self.assertEqual(
view.count("predictions.detections"),
dataset2.count("detections.detections"),
)
# Handle unlabeled data
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.CVATImageDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.CVATImageDataset,
include_all_data=True,
)
self.assertEqual(len(dataset), len(dataset2))
# Labels-only
data_path = self.images_dir
labels_path = os.path.join(self._new_dir(), "labels.xml")
dataset.export(
labels_path=labels_path,
dataset_type=fo.types.CVATImageDataset,
)
dataset2 = fo.Dataset.from_dir(
data_path=data_path,
labels_path=labels_path,
dataset_type=fo.types.CVATImageDataset,
include_all_data=True,
)
self.assertEqual(len(dataset), len(dataset2))
self.assertEqual(
dataset.count("predictions.detections"),
dataset2.count("detections.detections"),
)
@skipwindows
@drop_datasets
def test_fiftyone_dataset(self):
dataset = self._make_dataset()
# Standard format
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.FiftyOneDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.FiftyOneDataset,
)
self.assertEqual(len(dataset), len(dataset2))
self.assertListEqual(
[os.path.basename(f) for f in dataset.values("filepath")],
[os.path.basename(f) for f in dataset2.values("filepath")],
)
self.assertListEqual(
dataset.values("weather.label"), dataset2.values("weather.label")
)
self.assertEqual(
dataset.count("predictions.detections"),
dataset2.count("predictions.detections"),
)
# Test import/export of run results
dataset.clone_sample_field("predictions", "ground_truth")
view = dataset.limit(2)
view.evaluate_detections(
"predictions", gt_field="ground_truth", eval_key="test"
)
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.FiftyOneDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.FiftyOneDataset,
)
self.assertTrue("test" in dataset.list_evaluations())
self.assertTrue("test" in dataset2.list_evaluations())
view2 = dataset2.load_evaluation_view("test")
self.assertEqual(len(view), len(view2))
info = dataset.get_evaluation_info("test")
info2 = dataset2.get_evaluation_info("test")
self.assertEqual(info.key, info2.key)
class OpenLABELImageDatasetTests(ImageDatasetTests):
@drop_datasets
def test_openlabel_dataset(self):
import utils.openlabel as ol
labels_path = ol._make_image_labels(self._tmp_dir)
img_filepath = self._new_image(name="openlabel_test")
dataset = fo.Dataset.from_dir(
data_path=self.images_dir,
labels_path=labels_path,
dataset_type=fo.types.OpenLABELImageDataset,
)
self.assertEqual(dataset.count("detections.detections.label"), 1)
self.assertEqual(dataset.count("segmentations.detections.label"), 2)
self.assertEqual(dataset.count("keypoints.keypoints.label"), 1)
@drop_datasets
def test_openlabel_single_type_dataset(self):
import utils.openlabel as ol
labels_path = ol._make_image_labels(self._tmp_dir)
img_filepath = self._new_image(name="openlabel_test")
dataset = fo.Dataset.from_dir(
data_path=self.images_dir,
labels_path=labels_path,
dataset_type=fo.types.OpenLABELImageDataset,
label_types="detections",
)
self.assertTrue(
isinstance(dataset.first().ground_truth, fo.Detections)
)
@drop_datasets
def test_openlabel_segmentation_dataset(self):
import utils.openlabel as ol
labels_path = ol._make_segmentation_labels(self._tmp_dir)
img_filepath = self._new_image(name="openlabel_test")
dataset = fo.Dataset.from_dir(
data_path=self.images_dir,
labels_path=labels_path,
dataset_type=fo.types.OpenLABELImageDataset,
)
self.assertEqual(dataset.count("segmentations.detections.mask"), 2)
dataset = fo.Dataset.from_dir(
data_path=self.images_dir,
labels_path=labels_path,
dataset_type=fo.types.OpenLABELImageDataset,
use_polylines=True,
)
self.assertEqual(dataset.count("segmentations.polylines"), 2)
class VideoDatasetTests(unittest.TestCase):
def setUp(self):
temp_dir = etau.TempDir()
tmp_dir = temp_dir.__enter__()
ref_video_path = os.path.join(tmp_dir, "_ref_video.mp4")
videos_dir = os.path.join(tmp_dir, "_videos")
with etav.FFmpegVideoWriter(ref_video_path, 5, (640, 480)) as writer:
for _ in range(5):
img = np.random.randint(
255, size=(480, 640, 3), dtype=np.uint8
)
writer.write(img)
self._temp_dir = temp_dir
self._tmp_dir = tmp_dir
self._ref_video_path = ref_video_path
self.videos_dir = videos_dir
def tearDown(self):
self._temp_dir.__exit__()
def _new_video(self, filename=None):
if filename is None:
filename = self._new_name()
filepath = os.path.join(
self.videos_dir,
filename + os.path.splitext(self._ref_video_path)[1],
)
etau.copy_file(self._ref_video_path, filepath)
return filepath
def _new_name(self):
return "".join(
random.choice(string.ascii_lowercase + string.digits)
for _ in range(24)
)
def _new_dir(self):
return os.path.join(self._tmp_dir, self._new_name())
class OpenLABELVideoDatasetTests(VideoDatasetTests):
@drop_datasets
def test_openlabel_dataset(self):
import utils.openlabel as ol
labels_path = ol._make_video_labels(self._tmp_dir)
vid_filepath = self._new_video(filename="openlabel_test")
dataset = fo.Dataset.from_dir(
data_path=self.videos_dir,
labels_path=labels_path,
dataset_type=fo.types.OpenLABELVideoDataset,
)
self.assertEqual(
dataset.count("frames.detections.detections.label"), 5
)
self.assertEqual(
dataset.count("frames.segmentations.detections.label"), 5
)
self.assertEqual(dataset.count("frames.keypoints.keypoints.label"), 5)
class VideoExportCoersionTests(VideoDatasetTests):
@skipwindows
@drop_datasets
def test_clip_exports(self):
sample1 = fo.Sample(
filepath=self._new_video(),
predictions=fo.TemporalDetections(
detections=[
fo.TemporalDetection(
label="cat", support=[1, 3], confidence=0.9
)
]
),
)
sample1.frames[1] = fo.Frame(
weather=fo.Classification(label="sunny", confidence=0.9),
predictions=fo.Detections(
detections=[
fo.Detection(
label="cat",
bounding_box=[0.1, 0.1, 0.4, 0.4],
),
fo.Detection(
label="dog",
bounding_box=[0.5, 0.5, 0.4, 0.4],
),
]
),
)
sample1.frames[2] = fo.Frame(
weather=fo.Classification(label="cloudy", confidence=0.95),
predictions=fo.Detections(
detections=[
fo.Detection(
label="cat",
bounding_box=[0.1, 0.1, 0.4, 0.4],
confidence=0.9,
age=51,
cute=True,
mood="surly",
),
fo.Detection(
label="dog",
bounding_box=[0.5, 0.5, 0.4, 0.4],
confidence=0.95,
age=52,
cute=False,
mood="derpy",
),
]
),
)
sample2 = fo.Sample(
filepath=self._new_video(),
predictions=fo.TemporalDetections(
detections=[
fo.TemporalDetection(
label="cat",
support=[1, 4],
confidence=0.95,
),
fo.TemporalDetection(
label="dog",
support=[2, 5],
confidence=0.95,
),
]
),
)
dataset = fo.Dataset()
dataset.add_samples([sample1, sample2])
#
# Export unlabeled video clips
#
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.VideoDirectory,
label_field="predictions",
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.VideoDirectory,
)
self.assertEqual(
len(dataset2), dataset.count("predictions.detections")
)
#
# Export temporal detection clips in a TemporalDetections field
#
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.VideoClassificationDirectoryTree,
label_field="predictions",
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.VideoClassificationDirectoryTree,
)
self.assertEqual(
len(dataset2), dataset.count("predictions.detections")
)
#
# Export video classification clips directly from a ClipsView
#
export_dir = self._new_dir()
dataset.to_clips("predictions").export(
export_dir=export_dir,
dataset_type=fo.types.VideoClassificationDirectoryTree,
label_field="predictions",
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.VideoClassificationDirectoryTree,
)
self.assertEqual(
len(dataset2), dataset.count("predictions.detections")
)
#
# Export frame labels for clips
#
export_dir = self._new_dir()
clips = dataset.to_clips("predictions")
clips.export(
export_dir=export_dir,
dataset_type=fo.types.FiftyOneVideoLabelsDataset,
frame_labels_field="predictions",
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.FiftyOneVideoLabelsDataset,
)
self.assertEqual(
clips.count("frames.predictions.detections"),
dataset2.count("frames.predictions.detections"),
)
#
# Export entire clips view as a dataset
#
export_dir = self._new_dir()
clips = dataset.to_clips("predictions")
clips.export(
export_dir=export_dir, dataset_type=fo.types.FiftyOneDataset
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir, dataset_type=fo.types.FiftyOneDataset
)
self.assertEqual(len(clips), len(dataset2))
self.assertEqual(clips.count("frames"), dataset2.count("frames"))
self.assertListEqual(
clips.values("support"), dataset2.values("support")
)
dataset3 = clips.clone()
self.assertEqual(len(clips), len(dataset3))
self.assertEqual(clips.count("frames"), dataset3.count("frames"))
self.assertListEqual(
clips.values("support"), dataset3.values("support")
)
class UnlabeledVideoDatasetTests(VideoDatasetTests):
def _make_dataset(self):
samples = [fo.Sample(filepath=self._new_video()) for _ in range(5)]
dataset = fo.Dataset()
dataset.add_samples(samples)
return dataset
@drop_datasets
def test_video_directory(self):
dataset = self._make_dataset()
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.VideoDirectory,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.VideoDirectory,
)
self.assertEqual(len(dataset), len(dataset2))
class VideoClassificationDatasetTests(VideoDatasetTests):
def _make_dataset(self):
samples = [
fo.Sample(
filepath=self._new_video(),
predictions=fo.Classification(label="cat", confidence=0.9),
),
fo.Sample(
filepath=self._new_video(),
predictions=fo.Classification(label="dog", confidence=0.95),
),
fo.Sample(filepath=self._new_video()),
]
dataset = fo.Dataset()
dataset.add_samples(samples)
return dataset
@drop_datasets
def test_video_classification_directory_tree(self):
dataset = self._make_dataset()
# Standard format
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.VideoClassificationDirectoryTree,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.VideoClassificationDirectoryTree,
label_field="predictions",
)
self.assertEqual(len(dataset), len(dataset2))
self.assertEqual(
dataset.count("predictions"), dataset2.count("predictions")
)
class TemporalDetectionDatasetTests(VideoDatasetTests):
def _make_dataset(self):
samples = [
fo.Sample(
filepath=self._new_video(),
predictions=fo.TemporalDetections(
detections=[
fo.TemporalDetection(
label="cat", support=[1, 3], confidence=0.9
)
]
),
),
fo.Sample(
filepath=self._new_video(),
predictions=fo.TemporalDetections(
detections=[
fo.TemporalDetection(
label="cat",
support=[1, 4],
confidence=0.95,
),
fo.TemporalDetection(
label="dog",
support=[2, 5],
confidence=0.95,
),
]
),
),
fo.Sample(filepath=self._new_video()),
]
dataset = fo.Dataset()
dataset.add_samples(samples)
return dataset
@drop_datasets
def test_fiftyone_temporal_detection_dataset(self):
dataset = self._make_dataset()
# Standard format
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.FiftyOneTemporalDetectionDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.FiftyOneTemporalDetectionDataset,
label_field="predictions",
)
supports = dataset.values("predictions.detections.support")
supports2 = dataset2.values("predictions.detections.support")
self.assertEqual(len(dataset), len(dataset2))
# sorting is necessary because sample order is arbitrary
self.assertListEqual(
sorted(supports, key=lambda k: (k is None, k)),
sorted(supports2, key=lambda k: (k is None, k)),
)
# Use timestamps
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.FiftyOneTemporalDetectionDataset,
use_timestamps=True,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.FiftyOneTemporalDetectionDataset,
label_field="predictions",
)
supports = dataset.values("predictions.detections.support")
supports2 = dataset2.values("predictions.detections.support")
self.assertEqual(len(dataset), len(dataset2))
# sorting is necessary because sample order is arbitrary
self.assertListEqual(
sorted(supports, key=lambda k: (k is None, k)),
sorted(supports2, key=lambda k: (k is None, k)),
)
class MultitaskVideoDatasetTests(VideoDatasetTests):
def _make_dataset(self):
sample1 = fo.Sample(filepath=self._new_video())
sample1.frames[1] = fo.Frame(
weather=fo.Classification(label="sunny", confidence=0.9),
predictions=fo.Detections(
detections=[
fo.Detection(
label="cat",
bounding_box=[0.1, 0.1, 0.4, 0.4],
),
fo.Detection(
label="dog",
bounding_box=[0.5, 0.5, 0.4, 0.4],
),
]
),
)
sample1.frames[2] = fo.Frame(
weather=fo.Classification(label="cloudy", confidence=0.95),
predictions=fo.Detections(
detections=[
fo.Detection(
label="cat",
bounding_box=[0.1, 0.1, 0.4, 0.4],
confidence=0.9,
age=51,
cute=True,
mood="surly",
),
fo.Detection(
label="dog",
bounding_box=[0.5, 0.5, 0.4, 0.4],
confidence=0.95,
age=52,
cute=False,
mood="derpy",
),
]
),
)
sample2 = fo.Sample(filepath=self._new_video())
sample2.frames[1] = fo.Frame()
sample3 = fo.Sample(filepath=self._new_video())
dataset = fo.Dataset()
dataset.add_samples([sample1, sample2, sample3])
return dataset
@drop_datasets
def test_fiftyone_video_labels_dataset(self):
dataset = self._make_dataset()
# Standard format
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.FiftyOneVideoLabelsDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.FiftyOneVideoLabelsDataset,
)
self.assertEqual(len(dataset), len(dataset2))
self.assertEqual(
dataset.count("frames.weather"),
dataset2.count("frames.attributes"),
)
self.assertEqual(
dataset.distinct("frames.weather.confidence"),
dataset2.distinct("frames.attributes.confidence"),
)
self.assertEqual(
dataset.count("frames.predictions.detections"),
dataset2.count("frames.detections.detections"),
)
self.assertEqual(
dataset.distinct("frames.predictions.detections.confidence"),
dataset2.distinct("frames.detections.detections.confidence"),
)
@drop_datasets
def test_cvat_video_dataset(self):
dataset = self._make_dataset()
# Standard format
export_dir = self._new_dir()
view = dataset.limit(1)
view.export(
export_dir=export_dir,
dataset_type=fo.types.CVATVideoDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.CVATVideoDataset,
)
self.assertEqual(len(view), len(dataset2))
self.assertEqual(
view.count("frames.predictions.detections"),
dataset2.count("frames.detections.detections"),
)
# Handle unlabeled data
export_dir = self._new_dir()
dataset.export(
export_dir=export_dir,
dataset_type=fo.types.CVATVideoDataset,
)
dataset2 = fo.Dataset.from_dir(
dataset_dir=export_dir,
dataset_type=fo.types.CVATVideoDataset,
include_all_data=True,
)
self.assertEqual(len(dataset), len(dataset2))
# Labels-only
data_path = self.videos_dir
labels_path = os.path.join(self._new_dir(), "labels/")
dataset.export(
labels_path=labels_path,
dataset_type=fo.types.CVATVideoDataset,
)
dataset2 = fo.Dataset.from_dir(
data_path=data_path,
labels_path=labels_path,
dataset_type=fo.types.CVATVideoDataset,
include_all_data=True,
)
self.assertEqual(len(dataset), len(dataset2))
self.assertEqual(
dataset.count("frames.predictions.detections"),
dataset2.count("frames.detections.detections"),
)
if __name__ == "__main__":
fo.config.show_progress_bars = False
unittest.main(verbosity=2)
| 29.811728
| 79
| 0.548698
| 7,258
| 77,272
| 5.629788
| 0.061863
| 0.046254
| 0.045496
| 0.062994
| 0.855657
| 0.814199
| 0.795379
| 0.771518
| 0.744646
| 0.723037
| 0
| 0.018008
| 0.354643
| 77,272
| 2,591
| 80
| 29.823234
| 0.801376
| 0.037672
| 0
| 0.707566
| 0
| 0
| 0.067277
| 0.040738
| 0
| 0
| 0
| 0.000386
| 0.085378
| 1
| 0.031697
| false
| 0
| 0.010225
| 0.003067
| 0.062883
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9c67d611bc1b28666f80b1ea5f2cde01d9830a53
| 2,240
|
py
|
Python
|
Multi_optim/Generate_SMILES.py
|
CHUJianchun/VAE_MLP_PSO
|
76db540c7a6b97cd443af5abe5280271e176e56e
|
[
"MIT"
] | null | null | null |
Multi_optim/Generate_SMILES.py
|
CHUJianchun/VAE_MLP_PSO
|
76db540c7a6b97cd443af5abe5280271e176e56e
|
[
"MIT"
] | null | null | null |
Multi_optim/Generate_SMILES.py
|
CHUJianchun/VAE_MLP_PSO
|
76db540c7a6b97cd443af5abe5280271e176e56e
|
[
"MIT"
] | null | null | null |
import Multi_optim.Methods as Methods
import numpy as np
import torch
from torch.autograd import Variable
import Multi_optim.Transform as Transform
import importlib
from tqdm import tqdm
def molecular_generation(params, mlp_file_name, property_name, vae_file_name):
importlib.reload(Methods)
importlib.reload(Transform)
optim_result, optim_latent_space_code = Methods.gradient_descent_optimize(params, mlp_file_name, property_name)
optim_latent_space_code = optim_latent_space_code[:, :, 2:]
optim_latent_space_code = Variable(torch.tensor(optim_latent_space_code)).cuda()
optim_smiles = np.zeros((optim_latent_space_code.shape[0], optim_latent_space_code.shape[1], 2)).tolist()
point_index = 0
for point in optim_latent_space_code:
optim_step_code_index = 0
for optim_step_code in point:
anion_smiles, cation_smiles = Transform.latent_code_to_smiles(optim_step_code, vae_file_name)
optim_smiles[point_index][optim_step_code_index][0] = anion_smiles
optim_smiles[point_index][optim_step_code_index][1] = cation_smiles
optim_step_code_index += 1
point_index += 1
return optim_latent_space_code.cpu().detach().numpy(), optim_smiles, optim_result
def molecular_generation_cat(params, mlp_file_name, vae_file_name):
importlib.reload(Methods)
importlib.reload(Transform)
optim_result, optim_latent_space_code = Methods.gradient_descent_optimize_cat(params, mlp_file_name)
optim_latent_space_code = optim_latent_space_code[:, :, 2:]
optim_latent_space_code = Variable(torch.tensor(optim_latent_space_code)).cuda()
optim_smiles = np.zeros((optim_latent_space_code.shape[0], optim_latent_space_code.shape[1])).tolist()
point_index = 0
for point in tqdm(optim_latent_space_code):
optim_step_code_index = 0
for optim_step_code in point:
smiles = Transform.latent_code_to_smiles_cat(optim_step_code, vae_file_name)
optim_smiles[point_index][optim_step_code_index] = smiles
optim_step_code_index += 1
point_index += 1
return optim_latent_space_code.cpu().detach().numpy(), optim_smiles, optim_result
| 49.777778
| 116
| 0.746875
| 314
| 2,240
| 4.882166
| 0.165605
| 0.129159
| 0.187867
| 0.234834
| 0.840835
| 0.818656
| 0.742988
| 0.707763
| 0.682322
| 0.682322
| 0
| 0.009194
| 0.174554
| 2,240
| 44
| 117
| 50.909091
| 0.819903
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.275
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9c707aa513c6ad5afdea0f61d48a8fc594514c29
| 21,032
|
py
|
Python
|
tests/v2_validation/cattlevalidationtest/core/test_rancher_compose_commands.py
|
bmdepesa/validation-tests
|
23e7ab95ce76744483a0657f790b42a88a93436d
|
[
"Apache-2.0"
] | null | null | null |
tests/v2_validation/cattlevalidationtest/core/test_rancher_compose_commands.py
|
bmdepesa/validation-tests
|
23e7ab95ce76744483a0657f790b42a88a93436d
|
[
"Apache-2.0"
] | null | null | null |
tests/v2_validation/cattlevalidationtest/core/test_rancher_compose_commands.py
|
bmdepesa/validation-tests
|
23e7ab95ce76744483a0657f790b42a88a93436d
|
[
"Apache-2.0"
] | null | null | null |
from common_fixtures import * # NOQA
RCCOMMANDS_SUBDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resources/rccmds')
logger = logging.getLogger(__name__)
start_project_str = "Starting"
reason_skipped_str = 'Rancher compose files directory location not ' \
'set/does not Exist or account api keys provided'
if_compose_data_files = pytest.mark.skipif(
not os.path.isdir(RCCOMMANDS_SUBDIR) or
ACCESS_KEY is not None or SECRET_KEY is not None,
reason=reason_skipped_str)
@if_compose_data_files
def test_rancher_compose_create_service(client,
rancher_compose_container):
# This method tests the rancher compose create and up commands
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"create", "Creating stack", "rc1.yml")
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"up -d", start_project_str, "rc1.yml")
env, service = get_env_service_by_name(client, env_name, "test1")
# Confirm service is active and the containers are running
assert service.state == "active"
assert service.scale == 3
assert service.name == "test1"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_start_stop(client,
rancher_compose_container):
# This method tests the rancher compose start and stop commands
# Bug #4887 has been filed
# Bug #4933 has been filed [Start command has no response,
# Now "Started" response is being checked. Should be changed if required.
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"up -d", start_project_str, "rc1.yml")
env, service = get_env_service_by_name(client, env_name, "test1")
# Confirm service is active and the containers are running
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"stop", "Stopped", rancher_compose="rc1.yml")
# Note: We add a sleep as the stop command does not wait until complete
time.sleep(10)
service = client.wait_success(service)
# Confirm service is inactive and the containers are stopped
assert service.state == "inactive"
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
# Check for containers being stopped
for container in container_list:
assert container.state == "stopped"
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"start -d", "Started", "rc1.yml")
# Confirm service is active and the containers are running
service = client.wait_success(service, 300)
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_start_down(client,
rancher_compose_container):
# This method tests the rancher compose start and down commands
env_name = random_str().replace("-", "")
# Bug #4933 has been filed [Start command has no response,
# Now "Started" response is being checked. Should be changed if required.
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"up -d", start_project_str, "rc1.yml")
env, service = get_env_service_by_name(client, env_name, "test1")
# Confirm service is active and the containers are running
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"down", "Stopped", "rc1.yml")
# Note: We add a sleep as the down command does not wait until it completes
time.sleep(10)
service = client.wait_success(service)
# Confirm service is inactive and the containers are stopped
assert service.state == "inactive"
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
# Check for containers being stopped
for container in container_list:
assert container.state == "stopped"
launch_rancher_compose_from_file(client, RCCOMMANDS_SUBDIR,
"dc1.yml", env_name,
"start -d", "Started", "rc1.yml")
# Confirm service is active and the containers are running
service = client.wait_success(service, 300)
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_service_restart(client,
rancher_compose_container):
# This method tests the rancher compose restart command
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc2.yml", env_name,
"up -d", "Creating stack", "rc2.yml")
env, service1 = get_env_service_by_name(client, env_name, "test1")
env, service2 = get_env_service_by_name(client, env_name, "test2")
# Confirm service is active and the containers are running
service1 = client.wait_success(service1, 300)
service2 = client.wait_success(service2, 300)
assert service1.state == "active"
assert service2.state == "active"
check_config_for_service(client, service1, {"test1": "value1"}, 1)
check_config_for_service(client, service2, {"test2": "value2"}, 1)
container_list1 = get_service_container_list(client, service1)
assert len(container_list1) == 4
for container in container_list1:
assert container.state == "running"
assert container.startCount == 1
container_list2 = get_service_container_list(client, service2)
assert len(container_list2) == 4
for con in container_list2:
assert con.state == "running"
assert container.startCount == 1
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc2.yml", env_name,
"restart", "Restarting", "rc2.yml")
env, service1 = get_env_service_by_name(client, env_name, "test1")
env, service2 = get_env_service_by_name(client, env_name, "test2")
# Confirm service is active and the containers are running
service1 = client.wait_success(service1, 300)
service2 = client.wait_success(service2, 300)
assert service1.state == "active"
assert service2.state == "active"
check_config_for_service(client, service1, {"test1": "value1"}, 1)
check_config_for_service(client, service2, {"test2": "value2"}, 1)
container_list1 = get_service_container_list(client, service1)
assert len(container_list1) == 4
for container in container_list1:
assert container.state == "running"
assert container.startCount == 2
container_list2 = get_service_container_list(client, service2)
assert len(container_list2) == 4
for container in container_list2:
assert container.state == "running"
assert container.startCount == 2
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_service_restart_bat_inter(client,
rancher_compose_container):
# This method tests restart command with batchsize and inteval options
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc2.yml", env_name,
"up -d", "Creating stack", "rc2.yml")
env, service1 = get_env_service_by_name(client, env_name, "test1")
env, service2 = get_env_service_by_name(client, env_name, "test2")
# Confirm service is active and the containers are running
service1 = client.wait_success(service1, 300)
service2 = client.wait_success(service2, 300)
assert service1.state == "active"
assert service2.state == "active"
check_config_for_service(client, service1, {"test1": "value1"}, 1)
check_config_for_service(client, service2, {"test2": "value2"}, 1)
container_list1 = get_service_container_list(client, service1)
assert len(container_list1) == 4
for container in container_list1:
assert container.state == "running"
assert container.startCount == 1
container_list2 = get_service_container_list(client, service2)
assert len(container_list2) == 4
for con in container_list2:
assert con.state == "running"
assert container.startCount == 1
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc2.yml", env_name,
"restart --batch-size 2 --interval 100", "Restarting", "rc2.yml")
env, service1 = get_env_service_by_name(client, env_name, "test1")
env, service2 = get_env_service_by_name(client, env_name, "test2")
# Confirm service is active and the containers are running
service1 = client.wait_success(service1, 300)
service2 = client.wait_success(service2, 300)
assert service1.state == "active"
assert service2.state == "active"
check_config_for_service(client, service1, {"test1": "value1"}, 1)
check_config_for_service(client, service2, {"test2": "value2"}, 1)
container_list1 = get_service_container_list(client, service1)
assert len(container_list1) == 4
for container in container_list1:
assert container.state == "running"
assert container.startCount == 2
container_list2 = get_service_container_list(client, service2)
assert len(container_list2) == 4
for container in container_list2:
assert container.state == "running"
assert container.startCount == 2
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_delete(client,
rancher_compose_container):
# This method tests the delete command
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"up -d", start_project_str, "rc1.yml")
env, service = get_env_service_by_name(client, env_name, "test1")
# Confirm service is active and the containers are running
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"rm -f", "Deleting", "rc1.yml")
# Confirm service is removed
service = client.wait_success(service, 300)
assert service.state == "removed"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_scale(client,
rancher_compose_container):
# This method tests the scale command
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"up -d", start_project_str, "rc1.yml")
env, service = get_env_service_by_name(client, env_name, "test1")
# Confirm service is active and the containers are running
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
# Issue a command to scale up the services
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"scale test1=4", "Setting scale", "rc1.yml")
# Confirm service is active and the containers are running
service = client.wait_success(service, 300)
assert service.state == "active"
container_list = get_service_container_list(client, service)
# Check if the number of containers are incremented correctly
assert len(container_list) == 4
for container in container_list:
assert container.state == "running"
# Issue a command to scale down the services
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"scale test1=3", "Setting scale", "rc1.yml")
# Confirm service is active and the containers are running
service = client.wait_success(service, 300)
assert service.state == "active"
container_list = get_service_container_list(client, service)
# Check if the number of containers are decremented correctly
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_security(client,
rancher_compose_container,
socat_containers):
# This method tests the options in security tab in the UI
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc3.yml", env_name,
"up -d", start_project_str, "rc3.yml")
env, service = get_env_service_by_name(client, env_name, "test3")
# Confirm service is active and the containers are running
assert service.state == "active"
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for con in container_list:
assert con.state == "running"
containers = client.list_container(
externalId=con.externalId,
include="hosts",
removed_null=True)
docker_client = get_docker_client(containers[0].hosts[0])
inspect = docker_client.inspect_container(con.externalId)
logger.info("Checked for containers running " + con.name)
assert inspect["State"]["Running"]
assert inspect["HostConfig"]["Privileged"]
assert inspect["HostConfig"]["Memory"] == 104857600
assert inspect["HostConfig"]["CpuShares"] == 256
assert inspect["HostConfig"]["CapAdd"] == ["AUDIT_CONTROL",
"AUDIT_WRITE"]
assert inspect["HostConfig"]["CapDrop"] == ["BLOCK_SUSPEND",
"CHOWN"]
assert inspect["Config"]["Hostname"] == "rancherhost"
assert inspect["HostConfig"]["PidMode"] == "host"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_log_driver(client,
rancher_compose_container,
socat_containers):
# This test case fails bcos of bug #4773
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc3.yml", env_name,
"up -d", start_project_str, "rc3.yml")
env, service = get_env_service_by_name(client, env_name, "test3")
# Confirm service is active and the containers are running
assert service.state == "active"
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for con in container_list:
assert con.state == "running"
containers = client.list_container(
externalId=con.externalId,
include="hosts",
removed_null=True)
docker_client = get_docker_client(containers[0].hosts[0])
inspect = docker_client.inspect_container(con.externalId)
logger.info("Checked for containers running" + con.name)
assert inspect["State"]["Running"]
assert inspect["HostConfig"]["LogConfig"]["Type"] == "syslog"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_network(client,
rancher_compose_container,
socat_containers):
# This method tests the options in Network tab in the UI
hostname_override = "io.rancher.container.hostname_override"
requested_ip = "io.rancher.container.requested_ip"
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc4.yml", env_name,
"up -d", start_project_str, "rc4.yml")
env, service = get_env_service_by_name(client, env_name, "test4")
# Confirm service is active and the containers are running
assert service.state == "active"
check_config_for_service(client, service,
{"testrc": "RANCHER_COMPOSE"}, 1)
check_config_for_service(client, service,
{"io.rancher.container.requested_ip":
"209.243.140.21"}, 1)
check_config_for_service(client, service,
{"io.rancher.container.hostname_override":
"container_name"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 2
for con in container_list:
assert con.state == "running"
containers = client.list_container(
externalId=con.externalId,
include="hosts",
removed_null=True)
docker_client = get_docker_client(containers[0].hosts[0])
inspect = docker_client.inspect_container(con.externalId)
logger.info("Checked for containers running " + con.name)
assert inspect["State"]["Running"]
assert inspect["Config"]["Domainname"] == "xyz.com"
assert \
inspect["Config"]["Labels"][hostname_override] \
== "container_name"
assert inspect["Config"]["Labels"][requested_ip] == "209.243.140.21"
dns_list = inspect["HostConfig"]["Dns"]
dnssearch_list = inspect["HostConfig"]["DnsSearch"]
assert "209.243.150.21" in dns_list
assert "www.google.com" in dnssearch_list
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_volume(client,
rancher_compose_container,
socat_containers):
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc5.yml", env_name,
"up -d", start_project_str, "rc5.yml")
env, service = get_env_service_by_name(client, env_name, "test5")
# Confirm service is active and the containers are running
assert service.state == "active"
container_list = get_service_container_list(client, service)
assert len(container_list) == 2
for con in container_list:
assert con.state == "running"
containers = client.list_container(
externalId=con.externalId,
include="hosts",
removed_null=True)
docker_client = get_docker_client(containers[0].hosts[0])
inspect = docker_client.inspect_container(con.externalId)
logger.info("Checked for containers running " + con.name)
assert inspect["State"]["Running"]
assert "testvol:/home:rw" in inspect["HostConfig"]["Binds"]
delete_all(client, [env])
| 39.908918
| 79
| 0.671358
| 2,536
| 21,032
| 5.31388
| 0.096215
| 0.065598
| 0.032428
| 0.039255
| 0.89255
| 0.878599
| 0.869175
| 0.862348
| 0.848323
| 0.840606
| 0
| 0.020573
| 0.232693
| 21,032
| 526
| 80
| 39.984791
| 0.814475
| 0.127758
| 0
| 0.813514
| 0
| 0
| 0.110637
| 0.007766
| 0
| 0
| 0
| 0
| 0.264865
| 1
| 0.02973
| false
| 0
| 0.002703
| 0
| 0.032432
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
92e3e4f14f4eadad39fd02eedcf864a56056f371
| 4,831
|
py
|
Python
|
release/src/dependencies/utils_test.py
|
udda1996/ballerina-standard-library
|
86d1fde77bd48076acb3e60be2ba9bc2f631d3f8
|
[
"Apache-2.0"
] | 19
|
2020-10-23T14:09:52.000Z
|
2022-03-22T06:13:04.000Z
|
release/src/dependencies/utils_test.py
|
udda1996/ballerina-standard-library
|
86d1fde77bd48076acb3e60be2ba9bc2f631d3f8
|
[
"Apache-2.0"
] | 2,532
|
2020-10-22T11:19:19.000Z
|
2022-03-31T13:26:18.000Z
|
release/src/dependencies/utils_test.py
|
udda1996/ballerina-standard-library
|
86d1fde77bd48076acb3e60be2ba9bc2f631d3f8
|
[
"Apache-2.0"
] | 32
|
2020-10-22T06:24:07.000Z
|
2022-03-02T03:37:23.000Z
|
from sys import modules
import unittest
import utils
IO_MODULE = "module-ballerina-io"
JAVA_ARRAYS_MODULE = "module-ballerina-jballerina.java.arrays"
class TestDashboardCreation(unittest.TestCase):
def test_get_module_short_name(self):
expected = "java.arrays"
actual = utils.get_module_short_name(JAVA_ARRAYS_MODULE)
self.assertEqual(expected, actual)
expected = "io"
actual = utils.get_module_short_name(IO_MODULE)
self.assertEqual(expected, actual)
def test_get_repo_link(self):
expected = "[io](https://github.com/ballerina-platform/module-ballerina-io)"
actual = utils.get_repo_link(IO_MODULE)
self.assertEqual(expected, actual)
def test_get_release_badge(self):
expected = "[](https://github.com/ballerina-platform/module-ballerina-io/releases)"
actual = utils.get_release_badge(IO_MODULE)
self.assertEqual(expected, actual)
def test_get_build_status_badge(self):
expected = "[](https://github.com/ballerina-platform/module-ballerina-io/actions/workflows/build-timestamped-master.yml)"
actual = utils.get_build_status_badge(IO_MODULE)
self.assertEqual(expected, actual)
def test_get_trivy_badge(self):
expected = "[](https://github.com/ballerina-platform/module-ballerina-io/actions/workflows/trivy-scan.yml)"
actual = utils.get_trivy_badge(IO_MODULE)
self.assertEqual(expected, actual)
def test_get_codecov_badge(self):
expected = "[](https://codecov.io/gh/ballerina-platform/module-ballerina-io)"
actual = utils.get_codecov_badge(IO_MODULE, "master")
self.assertEqual(expected, actual)
def test_get_bugs_badge(self):
expected = "[](https://github.com/ballerina-platform/ballerina-standard-library/issues?q=is%3Aopen+label%3Amodule%2Fjava.arrays+label%3AType%2FBug)"
actual = utils.get_bugs_badge(JAVA_ARRAYS_MODULE)
self.assertEqual(expected, actual)
def test_get_pull_requests_badge(self):
expected = "[](https://github.com/ballerina-platform/module-ballerina-io/pulls)"
actual = utils.get_pull_requests_badge(IO_MODULE)
self.assertEqual(expected, actual)
def test_get_row_without_level(self):
module = { "name": JAVA_ARRAYS_MODULE, "default_branch": "master"}
expected = "||[java.arrays](https://github.com/ballerina-platform/module-ballerina-jballerina.java.arrays)|[](https://github.com/ballerina-platform/module-ballerina-jballerina.java.arrays/releases)|[](https://github.com/ballerina-platform/module-ballerina-jballerina.java.arrays/actions/workflows/build-timestamped-master.yml)|[](https://github.com/ballerina-platform/module-ballerina-jballerina.java.arrays/actions/workflows/trivy-scan.yml)|[](https://codecov.io/gh/ballerina-platform/module-ballerina-jballerina.java.arrays)|[](https://github.com/ballerina-platform/ballerina-standard-library/issues?q=is%3Aopen+label%3Amodule%2Fjava.arrays+label%3AType%2FBug)|[](https://github.com/ballerina-platform/module-ballerina-jballerina.java.arrays/pulls)|\n"
actual = utils.get_dashboard_row(module, '')
self.assertEqual(expected, actual)
| 72.104478
| 1,623
| 0.761954
| 628
| 4,831
| 5.746815
| 0.122611
| 0.122472
| 0.140205
| 0.195068
| 0.82322
| 0.802715
| 0.786645
| 0.749515
| 0.710446
| 0.64921
| 0
| 0.009091
| 0.089215
| 4,831
| 66
| 1,624
| 73.19697
| 0.811136
| 0
| 0
| 0.217391
| 0
| 0.152174
| 0.634444
| 0.008073
| 0
| 0
| 0
| 0
| 0.217391
| 1
| 0.195652
| false
| 0
| 0.065217
| 0
| 0.282609
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
92f9ab88adad2eee32346ffc22f6d384e8629730
| 84
|
py
|
Python
|
luupsmap/cli/commands/__init__.py
|
Team-LANS/luups-map
|
f9d5dddee86d9141dd6e2ae071c8e6fa37f376f2
|
[
"MIT"
] | 1
|
2019-07-30T17:13:41.000Z
|
2019-07-30T17:13:41.000Z
|
luupsmap/cli/commands/__init__.py
|
Team-LANS/luups-map
|
f9d5dddee86d9141dd6e2ae071c8e6fa37f376f2
|
[
"MIT"
] | 15
|
2019-02-09T13:27:18.000Z
|
2019-04-01T21:18:52.000Z
|
luupsmap/cli/commands/__init__.py
|
Team-LANS/luups-map
|
f9d5dddee86d9141dd6e2ae071c8e6fa37f376f2
|
[
"MIT"
] | null | null | null |
from luupsmap.cli.commands.seed import *
from luupsmap.cli.commands.update import *
| 28
| 42
| 0.809524
| 12
| 84
| 5.666667
| 0.583333
| 0.352941
| 0.441176
| 0.676471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 84
| 2
| 43
| 42
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
92fdd9f909c5f5f48fab2499510d45171326ce1b
| 111,828
|
py
|
Python
|
sdk/lusid/api/instruments_api.py
|
finbourne/lusid-sdk-python-preview
|
6b1ae2c634f4b9a816e070470e9c8e6e06eae0ee
|
[
"MIT"
] | 5
|
2019-06-01T11:37:48.000Z
|
2021-08-23T17:37:33.000Z
|
sdk/lusid/api/instruments_api.py
|
finbourne/lusid-sdk-python-preview
|
6b1ae2c634f4b9a816e070470e9c8e6e06eae0ee
|
[
"MIT"
] | 98
|
2020-04-15T06:05:43.000Z
|
2022-03-01T10:25:25.000Z
|
sdk/lusid/api/instruments_api.py
|
finbourne/lusid-sdk-python-preview
|
6b1ae2c634f4b9a816e070470e9c8e6e06eae0ee
|
[
"MIT"
] | 9
|
2019-09-30T11:19:25.000Z
|
2021-11-17T19:49:59.000Z
|
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3648
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from lusid.api_client import ApiClient
from lusid.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class InstrumentsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_instrument(self, identifier_type, identifier, **kwargs): # noqa: E501
"""[EARLY ACCESS] DeleteInstrument: Delete instrument # noqa: E501
Delete a particular instrument, as identified by a particular instrument identifier. Once deleted, an instrument is marked as inactive and can no longer be referenced when creating or updating transactions or holdings. You can still query existing transactions and holdings related to the deleted instrument. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_instrument(identifier_type, identifier, async_req=True)
>>> result = thread.get()
:param identifier_type: The unique identifier type to search, for example 'Figi'. (required)
:type identifier_type: str
:param identifier: An <i>identifierType</i> value to use to identify the instrument, for example 'BBG000BLNNV0'. (required)
:type identifier: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DeleteInstrumentResponse
"""
kwargs['_return_http_data_only'] = True
return self.delete_instrument_with_http_info(identifier_type, identifier, **kwargs) # noqa: E501
def delete_instrument_with_http_info(self, identifier_type, identifier, **kwargs): # noqa: E501
"""[EARLY ACCESS] DeleteInstrument: Delete instrument # noqa: E501
Delete a particular instrument, as identified by a particular instrument identifier. Once deleted, an instrument is marked as inactive and can no longer be referenced when creating or updating transactions or holdings. You can still query existing transactions and holdings related to the deleted instrument. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_instrument_with_http_info(identifier_type, identifier, async_req=True)
>>> result = thread.get()
:param identifier_type: The unique identifier type to search, for example 'Figi'. (required)
:type identifier_type: str
:param identifier: An <i>identifierType</i> value to use to identify the instrument, for example 'BBG000BLNNV0'. (required)
:type identifier: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DeleteInstrumentResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'identifier_type',
'identifier'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_instrument" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
if 'identifier_type' in local_var_params:
path_params['identifierType'] = local_var_params['identifier_type'] # noqa: E501
if 'identifier' in local_var_params:
path_params['identifier'] = local_var_params['identifier'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "DeleteInstrumentResponse",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/instruments/{identifierType}/{identifier}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def delete_instrument_properties(self, identifier_type, identifier, request_body, **kwargs): # noqa: E501
"""[EXPERIMENTAL] DeleteInstrumentProperties: Delete instrument properties # noqa: E501
Delete one or more properties from a particular instrument. If the properties are time-variant then an effective datetime from which to delete properties must be specified. If the properties are perpetual then it is invalid to specify an effective datetime for deletion. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_instrument_properties(identifier_type, identifier, request_body, async_req=True)
>>> result = thread.get()
:param identifier_type: The unique identifier type to search, for example 'Figi'. (required)
:type identifier_type: str
:param identifier: An <i>identifierType</i> value to use to identify the instrument, for example 'BBG000BLNNV0'. (required)
:type identifier: str
:param request_body: A list of property keys from the 'Instruments' domain whose properties to delete. (required)
:type request_body: list[str]
:param effective_at: The effective datetime or cut label at which to delete time-variant properties from. The property must exist at the specified 'effectiveAt' datetime. If the 'effectiveAt' is not provided or is before the time-variant property exists then a failure is returned. Do not specify this parameter if any of the properties to delete are perpetual.
:type effective_at: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DeleteInstrumentPropertiesResponse
"""
kwargs['_return_http_data_only'] = True
return self.delete_instrument_properties_with_http_info(identifier_type, identifier, request_body, **kwargs) # noqa: E501
def delete_instrument_properties_with_http_info(self, identifier_type, identifier, request_body, **kwargs): # noqa: E501
"""[EXPERIMENTAL] DeleteInstrumentProperties: Delete instrument properties # noqa: E501
Delete one or more properties from a particular instrument. If the properties are time-variant then an effective datetime from which to delete properties must be specified. If the properties are perpetual then it is invalid to specify an effective datetime for deletion. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_instrument_properties_with_http_info(identifier_type, identifier, request_body, async_req=True)
>>> result = thread.get()
:param identifier_type: The unique identifier type to search, for example 'Figi'. (required)
:type identifier_type: str
:param identifier: An <i>identifierType</i> value to use to identify the instrument, for example 'BBG000BLNNV0'. (required)
:type identifier: str
:param request_body: A list of property keys from the 'Instruments' domain whose properties to delete. (required)
:type request_body: list[str]
:param effective_at: The effective datetime or cut label at which to delete time-variant properties from. The property must exist at the specified 'effectiveAt' datetime. If the 'effectiveAt' is not provided or is before the time-variant property exists then a failure is returned. Do not specify this parameter if any of the properties to delete are perpetual.
:type effective_at: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DeleteInstrumentPropertiesResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'identifier_type',
'identifier',
'request_body',
'effective_at'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_instrument_properties" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'request_body' is set
if self.api_client.client_side_validation and ('request_body' not in local_var_params or # noqa: E501
local_var_params['request_body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `request_body` when calling `delete_instrument_properties`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier_type' in local_var_params:
path_params['identifierType'] = local_var_params['identifier_type'] # noqa: E501
if 'identifier' in local_var_params:
path_params['identifier'] = local_var_params['identifier'] # noqa: E501
query_params = []
if 'effective_at' in local_var_params and local_var_params['effective_at'] is not None: # noqa: E501
query_params.append(('effectiveAt', local_var_params['effective_at'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'request_body' in local_var_params:
body_params = local_var_params['request_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.11.3648'
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "DeleteInstrumentPropertiesResponse",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/instruments/{identifierType}/{identifier}/properties/$delete', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_instrument(self, identifier_type, identifier, **kwargs): # noqa: E501
"""GetInstrument: Get instrument # noqa: E501
Retrieve the definition of a particular instrument, as identified by a particular unique identifier. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_instrument(identifier_type, identifier, async_req=True)
>>> result = thread.get()
:param identifier_type: The unique identifier type to use, for example 'Figi'. (required)
:type identifier_type: str
:param identifier: An <i>identifierType</i> value to use to identify the instrument, for example 'BBG000BLNNV0'. (required)
:type identifier: str
:param effective_at: The effective datetime or cut label at which to retrieve the instrument. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to retrieve the instrument. Defaults to returning the latest version if not specified.
:type as_at: datetime
:param property_keys: A list of property keys from the 'Instrument' domain to decorate onto the instrument. These must have the format {domain}/{scope}/{code}, for example 'Instrument/system/Name'.
:type property_keys: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: Instrument
"""
kwargs['_return_http_data_only'] = True
return self.get_instrument_with_http_info(identifier_type, identifier, **kwargs) # noqa: E501
def get_instrument_with_http_info(self, identifier_type, identifier, **kwargs): # noqa: E501
"""GetInstrument: Get instrument # noqa: E501
Retrieve the definition of a particular instrument, as identified by a particular unique identifier. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_instrument_with_http_info(identifier_type, identifier, async_req=True)
>>> result = thread.get()
:param identifier_type: The unique identifier type to use, for example 'Figi'. (required)
:type identifier_type: str
:param identifier: An <i>identifierType</i> value to use to identify the instrument, for example 'BBG000BLNNV0'. (required)
:type identifier: str
:param effective_at: The effective datetime or cut label at which to retrieve the instrument. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to retrieve the instrument. Defaults to returning the latest version if not specified.
:type as_at: datetime
:param property_keys: A list of property keys from the 'Instrument' domain to decorate onto the instrument. These must have the format {domain}/{scope}/{code}, for example 'Instrument/system/Name'.
:type property_keys: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(Instrument, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'identifier_type',
'identifier',
'effective_at',
'as_at',
'property_keys'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_instrument" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
if 'identifier_type' in local_var_params:
path_params['identifierType'] = local_var_params['identifier_type'] # noqa: E501
if 'identifier' in local_var_params:
path_params['identifier'] = local_var_params['identifier'] # noqa: E501
query_params = []
if 'effective_at' in local_var_params and local_var_params['effective_at'] is not None: # noqa: E501
query_params.append(('effectiveAt', local_var_params['effective_at'])) # noqa: E501
if 'as_at' in local_var_params and local_var_params['as_at'] is not None: # noqa: E501
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
if 'property_keys' in local_var_params and local_var_params['property_keys'] is not None: # noqa: E501
query_params.append(('propertyKeys', local_var_params['property_keys'])) # noqa: E501
collection_formats['propertyKeys'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "Instrument",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/instruments/{identifierType}/{identifier}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_instrument_identifier_types(self, **kwargs): # noqa: E501
"""[EARLY ACCESS] GetInstrumentIdentifierTypes: Get instrument identifier types # noqa: E501
Retrieve a list of all valid instrument identifier types and whether they are unique or not. An instrument must have a value for at least one unique identifier type (it can have more than one unique type and value). In addition, a value is automatically generated for a LUSID Instrument ID (LUID) unique type by the system. An instrument can have values for multiple non-unique identifier types (or it can have zero non-unique types and values). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_instrument_identifier_types(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ResourceListOfInstrumentIdTypeDescriptor
"""
kwargs['_return_http_data_only'] = True
return self.get_instrument_identifier_types_with_http_info(**kwargs) # noqa: E501
def get_instrument_identifier_types_with_http_info(self, **kwargs): # noqa: E501
"""[EARLY ACCESS] GetInstrumentIdentifierTypes: Get instrument identifier types # noqa: E501
Retrieve a list of all valid instrument identifier types and whether they are unique or not. An instrument must have a value for at least one unique identifier type (it can have more than one unique type and value). In addition, a value is automatically generated for a LUSID Instrument ID (LUID) unique type by the system. An instrument can have values for multiple non-unique identifier types (or it can have zero non-unique types and values). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_instrument_identifier_types_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ResourceListOfInstrumentIdTypeDescriptor, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_instrument_identifier_types" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "ResourceListOfInstrumentIdTypeDescriptor",
}
return self.api_client.call_api(
'/api/instruments/identifierTypes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_instrument_properties(self, identifier_type, identifier, **kwargs): # noqa: E501
"""[EXPERIMENTAL] GetInstrumentProperties: Get instrument properties # noqa: E501
List all the properties of a particular instrument, as identified by a particular unique identifier. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_instrument_properties(identifier_type, identifier, async_req=True)
>>> result = thread.get()
:param identifier_type: The unique identifier type to search, for example 'Figi'. (required)
:type identifier_type: str
:param identifier: An <i>identifierType</i> value to use to identify the instrument, for example 'BBG000BLNNV0'. (required)
:type identifier: str
:param effective_at: The effective datetime or cut label at which to list the instrument's properties. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to list the instrument's properties. Defaults to returning the latest version of each property if not specified.
:type as_at: datetime
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: InstrumentProperties
"""
kwargs['_return_http_data_only'] = True
return self.get_instrument_properties_with_http_info(identifier_type, identifier, **kwargs) # noqa: E501
def get_instrument_properties_with_http_info(self, identifier_type, identifier, **kwargs): # noqa: E501
"""[EXPERIMENTAL] GetInstrumentProperties: Get instrument properties # noqa: E501
List all the properties of a particular instrument, as identified by a particular unique identifier. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_instrument_properties_with_http_info(identifier_type, identifier, async_req=True)
>>> result = thread.get()
:param identifier_type: The unique identifier type to search, for example 'Figi'. (required)
:type identifier_type: str
:param identifier: An <i>identifierType</i> value to use to identify the instrument, for example 'BBG000BLNNV0'. (required)
:type identifier: str
:param effective_at: The effective datetime or cut label at which to list the instrument's properties. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to list the instrument's properties. Defaults to returning the latest version of each property if not specified.
:type as_at: datetime
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(InstrumentProperties, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'identifier_type',
'identifier',
'effective_at',
'as_at'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_instrument_properties" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
if 'identifier_type' in local_var_params:
path_params['identifierType'] = local_var_params['identifier_type'] # noqa: E501
if 'identifier' in local_var_params:
path_params['identifier'] = local_var_params['identifier'] # noqa: E501
query_params = []
if 'effective_at' in local_var_params and local_var_params['effective_at'] is not None: # noqa: E501
query_params.append(('effectiveAt', local_var_params['effective_at'])) # noqa: E501
if 'as_at' in local_var_params and local_var_params['as_at'] is not None: # noqa: E501
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "InstrumentProperties",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/instruments/{identifierType}/{identifier}/properties', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_instrument_property_time_series(self, identifier_type, identifier, property_key, **kwargs): # noqa: E501
"""[EARLY ACCESS] GetInstrumentPropertyTimeSeries: Get instrument property time series # noqa: E501
Retrieve the complete time series (history) for a particular property of an instrument. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_instrument_property_time_series(identifier_type, identifier, property_key, async_req=True)
>>> result = thread.get()
:param identifier_type: The unique identifier type to search, for example 'Figi'. (required)
:type identifier_type: str
:param identifier: An <i>identifierType</i> value to use to identify the instrument, for example 'BBG000BLNNV0'. (required)
:type identifier: str
:param property_key: The property key of a property from the 'Instrument' domain whose history to retrieve. This must have the format {domain}/{scope}/{code}, for example 'Instrument/system/Name'. (required)
:type property_key: str
:param identifier_effective_at: The effective datetime used to resolve the instrument from the identifier. Defaults to the current LUSID system datetime if not specified.
:type identifier_effective_at: str
:param as_at: The asAt datetime at which to retrieve the instrument's property history. Defaults to returning the current datetime if not supplied.
:type as_at: datetime
:param filter: Expression to filter the results. For more information about filtering, see https://support.lusid.com/knowledgebase/article/KA-01914.
:type filter: str
:param page: The pagination token to use to continue listing properties; this value is returned from the previous call. If a pagination token is provided, the <i>filter</i>, <i>effectiveAt</i> and <i>asAt</i> fields must not have changed since the original request. For more information, see https://support.lusid.com/knowledgebase/article/KA-01915.
:type page: str
:param limit: When paginating, limit the results to this number.
:type limit: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ResourceListOfPropertyInterval
"""
kwargs['_return_http_data_only'] = True
return self.get_instrument_property_time_series_with_http_info(identifier_type, identifier, property_key, **kwargs) # noqa: E501
def get_instrument_property_time_series_with_http_info(self, identifier_type, identifier, property_key, **kwargs): # noqa: E501
"""[EARLY ACCESS] GetInstrumentPropertyTimeSeries: Get instrument property time series # noqa: E501
Retrieve the complete time series (history) for a particular property of an instrument. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_instrument_property_time_series_with_http_info(identifier_type, identifier, property_key, async_req=True)
>>> result = thread.get()
:param identifier_type: The unique identifier type to search, for example 'Figi'. (required)
:type identifier_type: str
:param identifier: An <i>identifierType</i> value to use to identify the instrument, for example 'BBG000BLNNV0'. (required)
:type identifier: str
:param property_key: The property key of a property from the 'Instrument' domain whose history to retrieve. This must have the format {domain}/{scope}/{code}, for example 'Instrument/system/Name'. (required)
:type property_key: str
:param identifier_effective_at: The effective datetime used to resolve the instrument from the identifier. Defaults to the current LUSID system datetime if not specified.
:type identifier_effective_at: str
:param as_at: The asAt datetime at which to retrieve the instrument's property history. Defaults to returning the current datetime if not supplied.
:type as_at: datetime
:param filter: Expression to filter the results. For more information about filtering, see https://support.lusid.com/knowledgebase/article/KA-01914.
:type filter: str
:param page: The pagination token to use to continue listing properties; this value is returned from the previous call. If a pagination token is provided, the <i>filter</i>, <i>effectiveAt</i> and <i>asAt</i> fields must not have changed since the original request. For more information, see https://support.lusid.com/knowledgebase/article/KA-01915.
:type page: str
:param limit: When paginating, limit the results to this number.
:type limit: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ResourceListOfPropertyInterval, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'identifier_type',
'identifier',
'property_key',
'identifier_effective_at',
'as_at',
'filter',
'page',
'limit'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_instrument_property_time_series" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'property_key' is set
if self.api_client.client_side_validation and ('property_key' not in local_var_params or # noqa: E501
local_var_params['property_key'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `property_key` when calling `get_instrument_property_time_series`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] > 5000: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `get_instrument_property_time_series`, must be a value less than or equal to `5000`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `get_instrument_property_time_series`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier_type' in local_var_params:
path_params['identifierType'] = local_var_params['identifier_type'] # noqa: E501
if 'identifier' in local_var_params:
path_params['identifier'] = local_var_params['identifier'] # noqa: E501
query_params = []
if 'property_key' in local_var_params and local_var_params['property_key'] is not None: # noqa: E501
query_params.append(('propertyKey', local_var_params['property_key'])) # noqa: E501
if 'identifier_effective_at' in local_var_params and local_var_params['identifier_effective_at'] is not None: # noqa: E501
query_params.append(('identifierEffectiveAt', local_var_params['identifier_effective_at'])) # noqa: E501
if 'as_at' in local_var_params and local_var_params['as_at'] is not None: # noqa: E501
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "ResourceListOfPropertyInterval",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/instruments/{identifierType}/{identifier}/properties/time-series', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_instruments(self, identifier_type, request_body, **kwargs): # noqa: E501
"""GetInstruments: Get instruments # noqa: E501
Retrieve the definition of one or more instruments, as identified by a collection of unique identifiers. Note that to retrieve all the instruments in the instrument master, use the List instruments endpoint instead. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_instruments(identifier_type, request_body, async_req=True)
>>> result = thread.get()
:param identifier_type: The unique identifier type to use, for example 'Figi'. (required)
:type identifier_type: str
:param request_body: A list of one or more <i>identifierType</i> values to use to identify instruments. (required)
:type request_body: list[str]
:param effective_at: The effective datetime or cut label at which to retrieve the instrument definitions. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to retrieve the instrument definitions. Defaults to returning the latest version of each instrument definition if not specified.
:type as_at: datetime
:param property_keys: A list of property keys from the 'Instrument' domain to decorate onto the instrument. These must have the format {domain}/{scope}/{code}, for example 'Instrument/system/Name'.
:type property_keys: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: GetInstrumentsResponse
"""
kwargs['_return_http_data_only'] = True
return self.get_instruments_with_http_info(identifier_type, request_body, **kwargs) # noqa: E501
def get_instruments_with_http_info(self, identifier_type, request_body, **kwargs): # noqa: E501
"""GetInstruments: Get instruments # noqa: E501
Retrieve the definition of one or more instruments, as identified by a collection of unique identifiers. Note that to retrieve all the instruments in the instrument master, use the List instruments endpoint instead. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_instruments_with_http_info(identifier_type, request_body, async_req=True)
>>> result = thread.get()
:param identifier_type: The unique identifier type to use, for example 'Figi'. (required)
:type identifier_type: str
:param request_body: A list of one or more <i>identifierType</i> values to use to identify instruments. (required)
:type request_body: list[str]
:param effective_at: The effective datetime or cut label at which to retrieve the instrument definitions. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to retrieve the instrument definitions. Defaults to returning the latest version of each instrument definition if not specified.
:type as_at: datetime
:param property_keys: A list of property keys from the 'Instrument' domain to decorate onto the instrument. These must have the format {domain}/{scope}/{code}, for example 'Instrument/system/Name'.
:type property_keys: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(GetInstrumentsResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'identifier_type',
'request_body',
'effective_at',
'as_at',
'property_keys'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_instruments" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'identifier_type' is set
if self.api_client.client_side_validation and ('identifier_type' not in local_var_params or # noqa: E501
local_var_params['identifier_type'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `identifier_type` when calling `get_instruments`") # noqa: E501
# verify the required parameter 'request_body' is set
if self.api_client.client_side_validation and ('request_body' not in local_var_params or # noqa: E501
local_var_params['request_body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `request_body` when calling `get_instruments`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'identifier_type' in local_var_params and local_var_params['identifier_type'] is not None: # noqa: E501
query_params.append(('identifierType', local_var_params['identifier_type'])) # noqa: E501
if 'effective_at' in local_var_params and local_var_params['effective_at'] is not None: # noqa: E501
query_params.append(('effectiveAt', local_var_params['effective_at'])) # noqa: E501
if 'as_at' in local_var_params and local_var_params['as_at'] is not None: # noqa: E501
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
if 'property_keys' in local_var_params and local_var_params['property_keys'] is not None: # noqa: E501
query_params.append(('propertyKeys', local_var_params['property_keys'])) # noqa: E501
collection_formats['propertyKeys'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'request_body' in local_var_params:
body_params = local_var_params['request_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.11.3648'
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "GetInstrumentsResponse",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/instruments/$get', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def list_instrument_properties(self, identifier_type, identifier, **kwargs): # noqa: E501
"""[EXPERIMENTAL] ListInstrumentProperties: Get instrument properties (with Pagination) # noqa: E501
List all the properties of a particular instrument, as identified by a particular unique identifier. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_instrument_properties(identifier_type, identifier, async_req=True)
>>> result = thread.get()
:param identifier_type: The unique identifier type to search, for example 'Figi'. (required)
:type identifier_type: str
:param identifier: An <i>identifierType</i> value to use to identify the instrument, for example 'BBG000BLNNV0'. (required)
:type identifier: str
:param effective_at: The effective datetime or cut label at which to list the instrument's properties. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to list the instrument's properties. Defaults to returning the latest version of each property if not specified.
:type as_at: datetime
:param page: The pagination token to use to continue listing commands; this value is returned from the previous call.
:type page: str
:param limit: When paginating, limit the results per page to this number.
:type limit: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ResourceListOfProperty
"""
kwargs['_return_http_data_only'] = True
return self.list_instrument_properties_with_http_info(identifier_type, identifier, **kwargs) # noqa: E501
def list_instrument_properties_with_http_info(self, identifier_type, identifier, **kwargs): # noqa: E501
"""[EXPERIMENTAL] ListInstrumentProperties: Get instrument properties (with Pagination) # noqa: E501
List all the properties of a particular instrument, as identified by a particular unique identifier. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_instrument_properties_with_http_info(identifier_type, identifier, async_req=True)
>>> result = thread.get()
:param identifier_type: The unique identifier type to search, for example 'Figi'. (required)
:type identifier_type: str
:param identifier: An <i>identifierType</i> value to use to identify the instrument, for example 'BBG000BLNNV0'. (required)
:type identifier: str
:param effective_at: The effective datetime or cut label at which to list the instrument's properties. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to list the instrument's properties. Defaults to returning the latest version of each property if not specified.
:type as_at: datetime
:param page: The pagination token to use to continue listing commands; this value is returned from the previous call.
:type page: str
:param limit: When paginating, limit the results per page to this number.
:type limit: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ResourceListOfProperty, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'identifier_type',
'identifier',
'effective_at',
'as_at',
'page',
'limit'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_instrument_properties" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('effective_at' in local_var_params and # noqa: E501
len(local_var_params['effective_at']) > 256): # noqa: E501
raise ApiValueError("Invalid value for parameter `effective_at` when calling `list_instrument_properties`, length must be less than or equal to `256`") # noqa: E501
if self.api_client.client_side_validation and ('effective_at' in local_var_params and # noqa: E501
len(local_var_params['effective_at']) < 0): # noqa: E501
raise ApiValueError("Invalid value for parameter `effective_at` when calling `list_instrument_properties`, length must be greater than or equal to `0`") # noqa: E501
if self.api_client.client_side_validation and 'effective_at' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_\+:\.]+$', local_var_params['effective_at']): # noqa: E501
raise ApiValueError("Invalid value for parameter `effective_at` when calling `list_instrument_properties`, must conform to the pattern `/^[a-zA-Z0-9\-_\+:\.]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('page' in local_var_params and # noqa: E501
len(local_var_params['page']) > 500): # noqa: E501
raise ApiValueError("Invalid value for parameter `page` when calling `list_instrument_properties`, length must be less than or equal to `500`") # noqa: E501
if self.api_client.client_side_validation and ('page' in local_var_params and # noqa: E501
len(local_var_params['page']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `page` when calling `list_instrument_properties`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'page' in local_var_params and not re.search(r'^[a-zA-Z0-9\+\/]*={0,3}$', local_var_params['page']): # noqa: E501
raise ApiValueError("Invalid value for parameter `page` when calling `list_instrument_properties`, must conform to the pattern `/^[a-zA-Z0-9\+\/]*={0,3}$/`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] > 5000: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `list_instrument_properties`, must be a value less than or equal to `5000`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `list_instrument_properties`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier_type' in local_var_params:
path_params['identifierType'] = local_var_params['identifier_type'] # noqa: E501
if 'identifier' in local_var_params:
path_params['identifier'] = local_var_params['identifier'] # noqa: E501
query_params = []
if 'effective_at' in local_var_params and local_var_params['effective_at'] is not None: # noqa: E501
query_params.append(('effectiveAt', local_var_params['effective_at'])) # noqa: E501
if 'as_at' in local_var_params and local_var_params['as_at'] is not None: # noqa: E501
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "ResourceListOfProperty",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/instruments/{identifierType}/{identifier}/properties/list', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def list_instruments(self, **kwargs): # noqa: E501
"""[EARLY ACCESS] ListInstruments: List instruments # noqa: E501
List all the instruments in the instrument master. To retrieve a particular set of instruments instead, use the Get instruments endpoint. The maximum number of instruments that this method can list per request is 2,000. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_instruments(async_req=True)
>>> result = thread.get()
:param as_at: The asAt datetime at which to list instruments. Defaults to returning the latest version of each instrument if not specified.
:type as_at: datetime
:param effective_at: The effective datetime or cut label at which to list instruments. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param page: The pagination token to use to continue listing instruments; this value is returned from the previous call. If a pagination token is provided, the <i>sortBy</i>, <i>filter</i>, <i>effectiveAt</i> and <i>asAt</i> fields must not have changed since the original request. Also, a <i>start</i> value cannot be provided. For more information, see https://support.lusid.com/knowledgebase/article/KA-01915.
:type page: str
:param sort_by: Order results by particular fields. Use the '-' sign to denote descending order, for example '-MyFieldName'.
:type sort_by: list[str]
:param start: When paginating, skip this number of results.
:type start: int
:param limit: When paginating, limit the results to this number.
:type limit: int
:param filter: Expression to filter the result set. Defaults to filtering out inactive instruments (that is, those that have been deleted). For more information about filtering results, see https://support.lusid.com/knowledgebase/article/KA-01914.
:type filter: str
:param instrument_property_keys: A list of property keys from the 'Instrument' domain to decorate onto instruments. These must have the format {domain}/{scope}/{code}, for example 'Instrument/system/Name'.
:type instrument_property_keys: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: PagedResourceListOfInstrument
"""
kwargs['_return_http_data_only'] = True
return self.list_instruments_with_http_info(**kwargs) # noqa: E501
def list_instruments_with_http_info(self, **kwargs): # noqa: E501
"""[EARLY ACCESS] ListInstruments: List instruments # noqa: E501
List all the instruments in the instrument master. To retrieve a particular set of instruments instead, use the Get instruments endpoint. The maximum number of instruments that this method can list per request is 2,000. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_instruments_with_http_info(async_req=True)
>>> result = thread.get()
:param as_at: The asAt datetime at which to list instruments. Defaults to returning the latest version of each instrument if not specified.
:type as_at: datetime
:param effective_at: The effective datetime or cut label at which to list instruments. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param page: The pagination token to use to continue listing instruments; this value is returned from the previous call. If a pagination token is provided, the <i>sortBy</i>, <i>filter</i>, <i>effectiveAt</i> and <i>asAt</i> fields must not have changed since the original request. Also, a <i>start</i> value cannot be provided. For more information, see https://support.lusid.com/knowledgebase/article/KA-01915.
:type page: str
:param sort_by: Order results by particular fields. Use the '-' sign to denote descending order, for example '-MyFieldName'.
:type sort_by: list[str]
:param start: When paginating, skip this number of results.
:type start: int
:param limit: When paginating, limit the results to this number.
:type limit: int
:param filter: Expression to filter the result set. Defaults to filtering out inactive instruments (that is, those that have been deleted). For more information about filtering results, see https://support.lusid.com/knowledgebase/article/KA-01914.
:type filter: str
:param instrument_property_keys: A list of property keys from the 'Instrument' domain to decorate onto instruments. These must have the format {domain}/{scope}/{code}, for example 'Instrument/system/Name'.
:type instrument_property_keys: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(PagedResourceListOfInstrument, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'as_at',
'effective_at',
'page',
'sort_by',
'start',
'limit',
'filter',
'instrument_property_keys'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_instruments" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] > 5000: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `list_instruments`, must be a value less than or equal to `5000`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `list_instruments`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'as_at' in local_var_params and local_var_params['as_at'] is not None: # noqa: E501
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
if 'effective_at' in local_var_params and local_var_params['effective_at'] is not None: # noqa: E501
query_params.append(('effectiveAt', local_var_params['effective_at'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'sort_by' in local_var_params and local_var_params['sort_by'] is not None: # noqa: E501
query_params.append(('sortBy', local_var_params['sort_by'])) # noqa: E501
collection_formats['sortBy'] = 'multi' # noqa: E501
if 'start' in local_var_params and local_var_params['start'] is not None: # noqa: E501
query_params.append(('start', local_var_params['start'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'instrument_property_keys' in local_var_params and local_var_params['instrument_property_keys'] is not None: # noqa: E501
query_params.append(('instrumentPropertyKeys', local_var_params['instrument_property_keys'])) # noqa: E501
collection_formats['instrumentPropertyKeys'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "PagedResourceListOfInstrument",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/instruments', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def update_instrument_identifier(self, identifier_type, identifier, update_instrument_identifier_request, **kwargs): # noqa: E501
"""[EARLY ACCESS] UpdateInstrumentIdentifier: Update instrument identifier # noqa: E501
Create, update or delete a particular instrument identifier for an instrument. To delete the identifier, leave the value unspecified in the request. If not being deleted, the identifier is updated if it exists and created if it does not. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_instrument_identifier(identifier_type, identifier, update_instrument_identifier_request, async_req=True)
>>> result = thread.get()
:param identifier_type: The unique identifier type to search, for example 'Figi'. (required)
:type identifier_type: str
:param identifier: An <i>identifierType</i> value to use to identify the instrument, for example 'BBG000BLNNV0'. (required)
:type identifier: str
:param update_instrument_identifier_request: The identifier to update or delete. This need not be the same value as the 'identifier' parameter used to retrieve the instrument. (required)
:type update_instrument_identifier_request: UpdateInstrumentIdentifierRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: Instrument
"""
kwargs['_return_http_data_only'] = True
return self.update_instrument_identifier_with_http_info(identifier_type, identifier, update_instrument_identifier_request, **kwargs) # noqa: E501
def update_instrument_identifier_with_http_info(self, identifier_type, identifier, update_instrument_identifier_request, **kwargs): # noqa: E501
"""[EARLY ACCESS] UpdateInstrumentIdentifier: Update instrument identifier # noqa: E501
Create, update or delete a particular instrument identifier for an instrument. To delete the identifier, leave the value unspecified in the request. If not being deleted, the identifier is updated if it exists and created if it does not. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_instrument_identifier_with_http_info(identifier_type, identifier, update_instrument_identifier_request, async_req=True)
>>> result = thread.get()
:param identifier_type: The unique identifier type to search, for example 'Figi'. (required)
:type identifier_type: str
:param identifier: An <i>identifierType</i> value to use to identify the instrument, for example 'BBG000BLNNV0'. (required)
:type identifier: str
:param update_instrument_identifier_request: The identifier to update or delete. This need not be the same value as the 'identifier' parameter used to retrieve the instrument. (required)
:type update_instrument_identifier_request: UpdateInstrumentIdentifierRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(Instrument, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'identifier_type',
'identifier',
'update_instrument_identifier_request'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_instrument_identifier" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'update_instrument_identifier_request' is set
if self.api_client.client_side_validation and ('update_instrument_identifier_request' not in local_var_params or # noqa: E501
local_var_params['update_instrument_identifier_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `update_instrument_identifier_request` when calling `update_instrument_identifier`") # noqa: E501
collection_formats = {}
path_params = {}
if 'identifier_type' in local_var_params:
path_params['identifierType'] = local_var_params['identifier_type'] # noqa: E501
if 'identifier' in local_var_params:
path_params['identifier'] = local_var_params['identifier'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'update_instrument_identifier_request' in local_var_params:
body_params = local_var_params['update_instrument_identifier_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.11.3648'
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "Instrument",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/instruments/{identifierType}/{identifier}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def upsert_instruments(self, request_body, **kwargs): # noqa: E501
"""UpsertInstruments: Upsert instruments # noqa: E501
Create or update one or more instruments in the instrument master. An instrument is updated if it already exists and created if it does not. In the request, each instrument definition should be keyed by a unique correlation ID. This ID is ephemeral and not stored by LUSID. It serves only to easily identify each instrument in the response. Note that an instrument must have at least one unique identifier, which is a combination of a type (such as 'Figi') and a value (such as 'BBG000BS1N49'). In addition, a random value is automatically generated for a LUSID Instrument ID (LUID) unique type by the system. For more information, see https://support.lusid.com/knowledgebase/article/KA-01862. The response returns both the collection of successfully created or updated instruments, as well as those that failed. For each failure, a reason is provided. It is important to check the failed set for unsuccessful results. The maximum number of instruments that this method can upsert per request is 2,000. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upsert_instruments(request_body, async_req=True)
>>> result = thread.get()
:param request_body: The definitions of the instruments to create or update. (required)
:type request_body: dict(str, InstrumentDefinition)
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: UpsertInstrumentsResponse
"""
kwargs['_return_http_data_only'] = True
return self.upsert_instruments_with_http_info(request_body, **kwargs) # noqa: E501
def upsert_instruments_with_http_info(self, request_body, **kwargs): # noqa: E501
"""UpsertInstruments: Upsert instruments # noqa: E501
Create or update one or more instruments in the instrument master. An instrument is updated if it already exists and created if it does not. In the request, each instrument definition should be keyed by a unique correlation ID. This ID is ephemeral and not stored by LUSID. It serves only to easily identify each instrument in the response. Note that an instrument must have at least one unique identifier, which is a combination of a type (such as 'Figi') and a value (such as 'BBG000BS1N49'). In addition, a random value is automatically generated for a LUSID Instrument ID (LUID) unique type by the system. For more information, see https://support.lusid.com/knowledgebase/article/KA-01862. The response returns both the collection of successfully created or updated instruments, as well as those that failed. For each failure, a reason is provided. It is important to check the failed set for unsuccessful results. The maximum number of instruments that this method can upsert per request is 2,000. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upsert_instruments_with_http_info(request_body, async_req=True)
>>> result = thread.get()
:param request_body: The definitions of the instruments to create or update. (required)
:type request_body: dict(str, InstrumentDefinition)
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(UpsertInstrumentsResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'request_body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method upsert_instruments" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'request_body' is set
if self.api_client.client_side_validation and ('request_body' not in local_var_params or # noqa: E501
local_var_params['request_body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `request_body` when calling `upsert_instruments`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'request_body' in local_var_params:
body_params = local_var_params['request_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.11.3648'
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
201: "UpsertInstrumentsResponse",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/instruments', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def upsert_instruments_properties(self, upsert_instrument_property_request, **kwargs): # noqa: E501
"""UpsertInstrumentsProperties: Upsert instruments properties # noqa: E501
Create or update one or more properties for particular instruments. Each instrument property is updated if it exists and created if it does not. For any failures, a reason is provided. Properties have an <i>effectiveFrom</i> datetime from which the property is valid, and an <i>effectiveUntil</i> datetime until which the property is valid. Not supplying an <i>effectiveUntil</i> datetime results in the property being valid indefinitely, or until the next <i>effectiveFrom</i> datetime of the property. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upsert_instruments_properties(upsert_instrument_property_request, async_req=True)
>>> result = thread.get()
:param upsert_instrument_property_request: A list of instruments and associated instrument properties to create or update. (required)
:type upsert_instrument_property_request: list[UpsertInstrumentPropertyRequest]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: UpsertInstrumentPropertiesResponse
"""
kwargs['_return_http_data_only'] = True
return self.upsert_instruments_properties_with_http_info(upsert_instrument_property_request, **kwargs) # noqa: E501
def upsert_instruments_properties_with_http_info(self, upsert_instrument_property_request, **kwargs): # noqa: E501
"""UpsertInstrumentsProperties: Upsert instruments properties # noqa: E501
Create or update one or more properties for particular instruments. Each instrument property is updated if it exists and created if it does not. For any failures, a reason is provided. Properties have an <i>effectiveFrom</i> datetime from which the property is valid, and an <i>effectiveUntil</i> datetime until which the property is valid. Not supplying an <i>effectiveUntil</i> datetime results in the property being valid indefinitely, or until the next <i>effectiveFrom</i> datetime of the property. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upsert_instruments_properties_with_http_info(upsert_instrument_property_request, async_req=True)
>>> result = thread.get()
:param upsert_instrument_property_request: A list of instruments and associated instrument properties to create or update. (required)
:type upsert_instrument_property_request: list[UpsertInstrumentPropertyRequest]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(UpsertInstrumentPropertiesResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'upsert_instrument_property_request'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method upsert_instruments_properties" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'upsert_instrument_property_request' is set
if self.api_client.client_side_validation and ('upsert_instrument_property_request' not in local_var_params or # noqa: E501
local_var_params['upsert_instrument_property_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `upsert_instrument_property_request` when calling `upsert_instruments_properties`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'upsert_instrument_property_request' in local_var_params:
body_params = local_var_params['upsert_instrument_property_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.11.3648'
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
201: "UpsertInstrumentPropertiesResponse",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/instruments/$upsertproperties', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
| 56.16675
| 1,077
| 0.636066
| 12,960
| 111,828
| 5.280556
| 0.033102
| 0.034134
| 0.054825
| 0.018937
| 0.970717
| 0.966056
| 0.960766
| 0.953416
| 0.948477
| 0.94177
| 0
| 0.014031
| 0.292556
| 111,828
| 1,990
| 1,078
| 56.194975
| 0.851021
| 0.533373
| 0
| 0.75395
| 1
| 0.013544
| 0.23754
| 0.074332
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028217
| false
| 0
| 0.005643
| 0
| 0.062077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
130096326d43717c47f92c410e0107717ab794cd
| 129
|
py
|
Python
|
kstore/helpers/themes.py
|
KeoH/django-keoh-kstore
|
825d7984a06823a4e592265c4e791b455ddbb481
|
[
"BSD-2-Clause"
] | null | null | null |
kstore/helpers/themes.py
|
KeoH/django-keoh-kstore
|
825d7984a06823a4e592265c4e791b455ddbb481
|
[
"BSD-2-Clause"
] | null | null | null |
kstore/helpers/themes.py
|
KeoH/django-keoh-kstore
|
825d7984a06823a4e592265c4e791b455ddbb481
|
[
"BSD-2-Clause"
] | null | null | null |
from kstore.models import BasicConfiguration
def get_theme_url():
return BasicConfiguration.objects.first().get_theme_url()
| 25.8
| 61
| 0.813953
| 16
| 129
| 6.3125
| 0.75
| 0.158416
| 0.217822
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.100775
| 129
| 4
| 62
| 32.25
| 0.87069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
1301ca39b141f5f1c334ed2c0cef42c9f096bee6
| 3,031
|
py
|
Python
|
tests/security/test_websocket.py
|
allieus/channels
|
9e0ec5f7e0aedc20e787137904fe12fb9ef63873
|
[
"BSD-3-Clause"
] | 1
|
2019-05-26T12:52:34.000Z
|
2019-05-26T12:52:34.000Z
|
tests/security/test_websocket.py
|
allieus/channels
|
9e0ec5f7e0aedc20e787137904fe12fb9ef63873
|
[
"BSD-3-Clause"
] | null | null | null |
tests/security/test_websocket.py
|
allieus/channels
|
9e0ec5f7e0aedc20e787137904fe12fb9ef63873
|
[
"BSD-3-Clause"
] | 1
|
2019-05-26T12:58:00.000Z
|
2019-05-26T12:58:00.000Z
|
import pytest
from channels.generic.websocket import AsyncWebsocketConsumer
from channels.security.websocket import OriginValidator
from channels.testing import WebsocketCommunicator
@pytest.mark.asyncio
async def test_origin_validator():
"""
Tests that OriginValidator correctly allows/denies connections.
"""
# Make our test application
application = OriginValidator(AsyncWebsocketConsumer, ["allowed-domain.com"])
# Test a normal connection
communicator = WebsocketCommunicator(application, "/", headers=[(b"origin", b"http://allowed-domain.com")])
connected, _ = await communicator.connect()
assert connected
await communicator.disconnect()
# Test a bad connection
communicator = WebsocketCommunicator(application, "/", headers=[(b"origin", b"http://bad-domain.com")])
connected, _ = await communicator.connect()
assert not connected
await communicator.disconnect()
# Make our test application, bad pattern
application = OriginValidator(AsyncWebsocketConsumer, ["*.allowed-domain.com"])
# Test a bad connection
communicator = WebsocketCommunicator(application, "/", headers=[(b"origin", b"http://allowed-domain.com")])
connected, _ = await communicator.connect()
assert not connected
await communicator.disconnect()
# Make our test application, good pattern
application = OriginValidator(AsyncWebsocketConsumer, [".allowed-domain.com"])
# Test a normal connection
communicator = WebsocketCommunicator(application, "/", headers=[(b"origin", b"http://www.allowed-domain.com")])
connected, _ = await communicator.connect()
assert connected
await communicator.disconnect()
# Make our test application, with scheme://domain[:port] for http
application = OriginValidator(AsyncWebsocketConsumer, ["http://allowed-domain.com"])
# Test a normal connection
communicator = WebsocketCommunicator(application, "/", headers=[(b"origin", b"http://allowed-domain.com")])
connected, _ = await communicator.connect()
assert connected
await communicator.disconnect()
# Test a bad connection
communicator = WebsocketCommunicator(application, "/", headers=[(b"origin", b"https://bad-domain.com:443")])
connected, _ = await communicator.connect()
assert not connected
await communicator.disconnect()
# Make our test application, with all hosts allowed
application = OriginValidator(AsyncWebsocketConsumer, ["*"])
# Test a connection without any headers
communicator = WebsocketCommunicator(application, "/", headers=[])
connected, _ = await communicator.connect()
assert connected
await communicator.disconnect()
# Make our test application, with no hosts allowed
application = OriginValidator(AsyncWebsocketConsumer, [])
# Test a connection without any headers
communicator = WebsocketCommunicator(application, "/", headers=[])
connected, _ = await communicator.connect()
assert not connected
await communicator.disconnect()
| 46.630769
| 115
| 0.730782
| 295
| 3,031
| 7.474576
| 0.19661
| 0.101587
| 0.188662
| 0.185034
| 0.819048
| 0.819048
| 0.819048
| 0.819048
| 0.819048
| 0.781859
| 0
| 0.001178
| 0.160013
| 3,031
| 64
| 116
| 47.359375
| 0.864886
| 0.159683
| 0
| 0.659091
| 0
| 0
| 0.113331
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
131ee85c69e61936c5c274e8b62c3f0012827f29
| 1,020,644
|
py
|
Python
|
operators/seldon-operator/python/pulumi_pulumi_kubernetes_crds_operators_seldon_operator/machinelearning/v1/_inputs.py
|
pulumi/pulumi-kubernetes-crds
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
[
"Apache-2.0"
] | null | null | null |
operators/seldon-operator/python/pulumi_pulumi_kubernetes_crds_operators_seldon_operator/machinelearning/v1/_inputs.py
|
pulumi/pulumi-kubernetes-crds
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
[
"Apache-2.0"
] | 2
|
2020-09-18T17:12:23.000Z
|
2020-12-30T19:40:56.000Z
|
operators/seldon-operator/python/pulumi_pulumi_kubernetes_crds_operators_seldon_operator/machinelearning/v1/_inputs.py
|
pulumi/pulumi-kubernetes-crds
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'SeldonDeploymentSpecArgs',
'SeldonDeploymentSpecPredictorsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalMetricSelectorArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalMetricSelectorMatchExpressionsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalTargetAverageValueArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalTargetValueArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectAverageValueArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectSelectorArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectSelectorMatchExpressionsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectTargetArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectTargetValueArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsSelectorArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsSelectorMatchExpressionsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsTargetAverageValueArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsResourceArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsResourceTargetAverageValueArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleDownArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleDownPoliciesArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleUpArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleUpPoliciesArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsTargetArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsTargetAverageValueArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsTargetValueArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecTriggersArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecTriggersAuthenticationRefArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsPdbSpecArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsPdbSpecMaxUnavailableArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsPdbSpecMinAvailableArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressionsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFieldsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressionsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFieldsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvFromArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvFromConfigMapRefArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvFromSecretRefArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromConfigMapKeyRefArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromFieldRefArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromResourceFieldRefArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromResourceFieldRefDivisorArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromSecretKeyRefArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecycleArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartExecArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartHttpGetArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartHttpGetHttpHeadersArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartTcpSocketArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopExecArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopHttpGetArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopHttpGetHttpHeadersArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopTcpSocketArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeExecArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeHttpGetArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeHttpGetHttpHeadersArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeTcpSocketArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersPortsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeExecArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeHttpGetArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeHttpGetHttpHeadersArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeTcpSocketArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersResourcesArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersResourcesLimitsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersResourcesRequestsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextCapabilitiesArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextSeLinuxOptionsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextWindowsOptionsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeExecArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeHttpGetArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeHttpGetHttpHeadersArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeHttpGetPortArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeTcpSocketArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeTcpSocketPortArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersVolumeDevicesArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersVolumeMountsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecDnsConfigArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecDnsConfigOptionsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvFromArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvFromConfigMapRefArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvFromSecretRefArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromConfigMapKeyRefArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromFieldRefArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromResourceFieldRefArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromResourceFieldRefDivisorArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromSecretKeyRefArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecycleArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartExecArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartHttpGetArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartHttpGetHttpHeadersArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartHttpGetPortArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartTcpSocketArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartTcpSocketPortArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopExecArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopHttpGetArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopHttpGetHttpHeadersArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopHttpGetPortArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopTcpSocketArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopTcpSocketPortArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeExecArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeHttpGetArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeHttpGetHttpHeadersArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeHttpGetPortArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeTcpSocketArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeTcpSocketPortArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersPortsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeExecArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeHttpGetArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeHttpGetHttpHeadersArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeHttpGetPortArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeTcpSocketArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeTcpSocketPortArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesLimitsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesRequestsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextCapabilitiesArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextSeLinuxOptionsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextWindowsOptionsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeExecArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeHttpGetArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeHttpGetHttpHeadersArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeHttpGetPortArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeTcpSocketArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeTcpSocketPortArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersVolumeDevicesArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersVolumeMountsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecHostAliasesArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecImagePullSecretsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvFromArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvFromConfigMapRefArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvFromSecretRefArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromConfigMapKeyRefArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromFieldRefArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromResourceFieldRefArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromResourceFieldRefDivisorArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromSecretKeyRefArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecycleArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartExecArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartHttpGetArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartHttpGetHttpHeadersArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartTcpSocketArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopExecArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopHttpGetArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopHttpGetHttpHeadersArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopTcpSocketArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeExecArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeHttpGetArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeHttpGetHttpHeadersArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeTcpSocketArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersPortsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeExecArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeHttpGetArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeHttpGetHttpHeadersArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeTcpSocketArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersResourcesArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersResourcesLimitsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersResourcesRequestsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextCapabilitiesArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextSeLinuxOptionsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextWindowsOptionsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeExecArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeHttpGetArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeHttpGetHttpHeadersArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeHttpGetPortArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeTcpSocketArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeTcpSocketPortArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersVolumeDevicesArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersVolumeMountsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecOverheadArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecReadinessGatesArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextSeLinuxOptionsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextSysctlsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextWindowsOptionsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecTolerationsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraintsArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraintsLabelSelectorArgs',
'SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraintsLabelSelectorMatchExpressionsArgs',
'SeldonDeploymentSpecPredictorsEngineResourcesArgs',
'SeldonDeploymentSpecPredictorsEngineResourcesLimitsArgs',
'SeldonDeploymentSpecPredictorsEngineResourcesRequestsArgs',
'SeldonDeploymentSpecPredictorsExplainerArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvFromArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvFromConfigMapRefArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvFromSecretRefArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromConfigMapKeyRefArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromFieldRefArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromResourceFieldRefArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromResourceFieldRefDivisorArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromSecretKeyRefArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecycleArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartExecArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartHttpGetArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartHttpGetHttpHeadersArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartTcpSocketArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopExecArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopHttpGetArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopHttpGetHttpHeadersArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopTcpSocketArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeExecArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeHttpGetArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeHttpGetHttpHeadersArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeTcpSocketArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecPortsArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeExecArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeHttpGetArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeHttpGetHttpHeadersArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeTcpSocketArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecResourcesArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecResourcesLimitsArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecResourcesRequestsArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextCapabilitiesArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextSeLinuxOptionsArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextWindowsOptionsArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeExecArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeHttpGetArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeHttpGetHttpHeadersArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeHttpGetPortArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeTcpSocketArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeTcpSocketPortArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecVolumeDevicesArgs',
'SeldonDeploymentSpecPredictorsExplainerContainerSpecVolumeMountsArgs',
'SeldonDeploymentSpecPredictorsExplainerEndpointArgs',
'SeldonDeploymentSpecPredictorsGraphArgs',
'SeldonDeploymentSpecPredictorsGraphChildrenArgs',
'SeldonDeploymentSpecPredictorsGraphChildrenChildrenArgs',
'SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenArgs',
'SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenArgs',
'SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenEndpointArgs',
'SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenLoggerArgs',
'SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenParametersArgs',
'SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenEndpointArgs',
'SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenLoggerArgs',
'SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenParametersArgs',
'SeldonDeploymentSpecPredictorsGraphChildrenChildrenEndpointArgs',
'SeldonDeploymentSpecPredictorsGraphChildrenChildrenLoggerArgs',
'SeldonDeploymentSpecPredictorsGraphChildrenChildrenParametersArgs',
'SeldonDeploymentSpecPredictorsGraphChildrenEndpointArgs',
'SeldonDeploymentSpecPredictorsGraphChildrenLoggerArgs',
'SeldonDeploymentSpecPredictorsGraphChildrenParametersArgs',
'SeldonDeploymentSpecPredictorsGraphEndpointArgs',
'SeldonDeploymentSpecPredictorsGraphLoggerArgs',
'SeldonDeploymentSpecPredictorsGraphParametersArgs',
'SeldonDeploymentSpecPredictorsSslArgs',
'SeldonDeploymentSpecPredictorsSvcOrchSpecArgs',
'SeldonDeploymentSpecPredictorsSvcOrchSpecEnvArgs',
'SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromArgs',
'SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromConfigMapKeyRefArgs',
'SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromFieldRefArgs',
'SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromResourceFieldRefArgs',
'SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromResourceFieldRefDivisorArgs',
'SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromSecretKeyRefArgs',
'SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesArgs',
'SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesLimitsArgs',
'SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesRequestsArgs',
'SeldonDeploymentStatusArgs',
'SeldonDeploymentStatusAddressArgs',
'SeldonDeploymentStatusDeploymentStatusArgs',
'SeldonDeploymentStatusServiceStatusArgs',
]
@pulumi.input_type
class SeldonDeploymentSpecArgs:
def __init__(__self__, *,
predictors: pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsArgs']]],
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
oauth_key: Optional[pulumi.Input[str]] = None,
oauth_secret: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
replicas: Optional[pulumi.Input[int]] = None,
server_type: Optional[pulumi.Input[str]] = None,
transport: Optional[pulumi.Input[str]] = None):
"""
SeldonDeploymentSpec defines the desired state of SeldonDeployment
:param pulumi.Input[str] name: Name is Deprecated will be removed in future
"""
pulumi.set(__self__, "predictors", predictors)
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if name is not None:
pulumi.set(__self__, "name", name)
if oauth_key is not None:
pulumi.set(__self__, "oauth_key", oauth_key)
if oauth_secret is not None:
pulumi.set(__self__, "oauth_secret", oauth_secret)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if replicas is not None:
pulumi.set(__self__, "replicas", replicas)
if server_type is not None:
pulumi.set(__self__, "server_type", server_type)
if transport is not None:
pulumi.set(__self__, "transport", transport)
@property
@pulumi.getter
def predictors(self) -> pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsArgs']]]:
return pulumi.get(self, "predictors")
@predictors.setter
def predictors(self, value: pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsArgs']]]):
pulumi.set(self, "predictors", value)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name is Deprecated will be removed in future
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def oauth_key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "oauth_key")
@oauth_key.setter
def oauth_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth_key", value)
@property
@pulumi.getter
def oauth_secret(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "oauth_secret")
@oauth_secret.setter
def oauth_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oauth_secret", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter
def replicas(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "replicas")
@replicas.setter
def replicas(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replicas", value)
@property
@pulumi.getter(name="serverType")
def server_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "server_type")
@server_type.setter
def server_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_type", value)
@property
@pulumi.getter
def transport(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "transport")
@transport.setter
def transport(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "transport", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsArgs:
def __init__(__self__, *,
graph: pulumi.Input['SeldonDeploymentSpecPredictorsGraphArgs'],
name: pulumi.Input[str],
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
component_specs: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsArgs']]]] = None,
engine_resources: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsEngineResourcesArgs']] = None,
explainer: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerArgs']] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
replicas: Optional[pulumi.Input[int]] = None,
shadow: Optional[pulumi.Input[bool]] = None,
ssl: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSslArgs']] = None,
svc_orch_spec: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecArgs']] = None,
traffic: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input['SeldonDeploymentSpecPredictorsEngineResourcesArgs'] engine_resources: ResourceRequirements describes the compute resource requirements.
"""
pulumi.set(__self__, "graph", graph)
pulumi.set(__self__, "name", name)
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if component_specs is not None:
pulumi.set(__self__, "component_specs", component_specs)
if engine_resources is not None:
pulumi.set(__self__, "engine_resources", engine_resources)
if explainer is not None:
pulumi.set(__self__, "explainer", explainer)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if replicas is not None:
pulumi.set(__self__, "replicas", replicas)
if shadow is not None:
pulumi.set(__self__, "shadow", shadow)
if ssl is not None:
pulumi.set(__self__, "ssl", ssl)
if svc_orch_spec is not None:
pulumi.set(__self__, "svc_orch_spec", svc_orch_spec)
if traffic is not None:
pulumi.set(__self__, "traffic", traffic)
@property
@pulumi.getter
def graph(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsGraphArgs']:
return pulumi.get(self, "graph")
@graph.setter
def graph(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsGraphArgs']):
pulumi.set(self, "graph", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter(name="componentSpecs")
def component_specs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsArgs']]]]:
return pulumi.get(self, "component_specs")
@component_specs.setter
def component_specs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsArgs']]]]):
pulumi.set(self, "component_specs", value)
@property
@pulumi.getter(name="engineResources")
def engine_resources(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsEngineResourcesArgs']]:
"""
ResourceRequirements describes the compute resource requirements.
"""
return pulumi.get(self, "engine_resources")
@engine_resources.setter
def engine_resources(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsEngineResourcesArgs']]):
pulumi.set(self, "engine_resources", value)
@property
@pulumi.getter
def explainer(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerArgs']]:
return pulumi.get(self, "explainer")
@explainer.setter
def explainer(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerArgs']]):
pulumi.set(self, "explainer", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def replicas(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "replicas")
@replicas.setter
def replicas(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replicas", value)
@property
@pulumi.getter
def shadow(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "shadow")
@shadow.setter
def shadow(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "shadow", value)
@property
@pulumi.getter
def ssl(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSslArgs']]:
return pulumi.get(self, "ssl")
@ssl.setter
def ssl(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSslArgs']]):
pulumi.set(self, "ssl", value)
@property
@pulumi.getter(name="svcOrchSpec")
def svc_orch_spec(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecArgs']]:
return pulumi.get(self, "svc_orch_spec")
@svc_orch_spec.setter
def svc_orch_spec(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecArgs']]):
pulumi.set(self, "svc_orch_spec", value)
@property
@pulumi.getter
def traffic(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "traffic")
@traffic.setter
def traffic(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "traffic", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsArgs:
def __init__(__self__, *,
hpa_spec: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecArgs']] = None,
keda_spec: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecArgs']] = None,
metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
pdb_spec: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsPdbSpecArgs']] = None,
replicas: Optional[pulumi.Input[int]] = None,
spec: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecArgs']] = None):
"""
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecArgs'] keda_spec: SeldonScaledObjectSpec is the spec for a KEDA ScaledObject resource
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecArgs'] spec: PodSpec is a description of a pod.
"""
if hpa_spec is not None:
pulumi.set(__self__, "hpa_spec", hpa_spec)
if keda_spec is not None:
pulumi.set(__self__, "keda_spec", keda_spec)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if pdb_spec is not None:
pulumi.set(__self__, "pdb_spec", pdb_spec)
if replicas is not None:
pulumi.set(__self__, "replicas", replicas)
if spec is not None:
pulumi.set(__self__, "spec", spec)
@property
@pulumi.getter(name="hpaSpec")
def hpa_spec(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecArgs']]:
return pulumi.get(self, "hpa_spec")
@hpa_spec.setter
def hpa_spec(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecArgs']]):
pulumi.set(self, "hpa_spec", value)
@property
@pulumi.getter(name="kedaSpec")
def keda_spec(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecArgs']]:
"""
SeldonScaledObjectSpec is the spec for a KEDA ScaledObject resource
"""
return pulumi.get(self, "keda_spec")
@keda_spec.setter
def keda_spec(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecArgs']]):
pulumi.set(self, "keda_spec", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter(name="pdbSpec")
def pdb_spec(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsPdbSpecArgs']]:
return pulumi.get(self, "pdb_spec")
@pdb_spec.setter
def pdb_spec(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsPdbSpecArgs']]):
pulumi.set(self, "pdb_spec", value)
@property
@pulumi.getter
def replicas(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "replicas")
@replicas.setter
def replicas(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replicas", value)
@property
@pulumi.getter
def spec(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecArgs']]:
"""
PodSpec is a description of a pod.
"""
return pulumi.get(self, "spec")
@spec.setter
def spec(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecArgs']]):
pulumi.set(self, "spec", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecArgs:
def __init__(__self__, *,
max_replicas: pulumi.Input[int],
metrics: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsArgs']]]] = None,
min_replicas: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "max_replicas", max_replicas)
if metrics is not None:
pulumi.set(__self__, "metrics", metrics)
if min_replicas is not None:
pulumi.set(__self__, "min_replicas", min_replicas)
@property
@pulumi.getter(name="maxReplicas")
def max_replicas(self) -> pulumi.Input[int]:
return pulumi.get(self, "max_replicas")
@max_replicas.setter
def max_replicas(self, value: pulumi.Input[int]):
pulumi.set(self, "max_replicas", value)
@property
@pulumi.getter
def metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsArgs']]]]:
return pulumi.get(self, "metrics")
@metrics.setter
def metrics(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsArgs']]]]):
pulumi.set(self, "metrics", value)
@property
@pulumi.getter(name="minReplicas")
def min_replicas(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_replicas")
@min_replicas.setter
def min_replicas(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_replicas", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
external: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalArgs']] = None,
object: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectArgs']] = None,
pods: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsArgs']] = None,
resource: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsResourceArgs']] = None):
"""
MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).
:param pulumi.Input[str] type: type is the type of metric source. It should be one of "Object", "Pods" or "Resource", each mapping to a matching field in the object.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalArgs'] external: external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectArgs'] object: object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsArgs'] pods: pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsResourceArgs'] resource: resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
"""
pulumi.set(__self__, "type", type)
if external is not None:
pulumi.set(__self__, "external", external)
if object is not None:
pulumi.set(__self__, "object", object)
if pods is not None:
pulumi.set(__self__, "pods", pods)
if resource is not None:
pulumi.set(__self__, "resource", resource)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
type is the type of metric source. It should be one of "Object", "Pods" or "Resource", each mapping to a matching field in the object.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def external(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalArgs']]:
"""
external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
"""
return pulumi.get(self, "external")
@external.setter
def external(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalArgs']]):
pulumi.set(self, "external", value)
@property
@pulumi.getter
def object(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectArgs']]:
"""
object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
"""
return pulumi.get(self, "object")
@object.setter
def object(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectArgs']]):
pulumi.set(self, "object", value)
@property
@pulumi.getter
def pods(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsArgs']]:
"""
pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
"""
return pulumi.get(self, "pods")
@pods.setter
def pods(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsArgs']]):
pulumi.set(self, "pods", value)
@property
@pulumi.getter
def resource(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsResourceArgs']]:
"""
resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsResourceArgs']]):
pulumi.set(self, "resource", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalArgs:
def __init__(__self__, *,
metric_name: pulumi.Input[str],
metric_selector: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalMetricSelectorArgs']] = None,
target_average_value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalTargetAverageValueArgs']] = None,
target_value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalTargetValueArgs']] = None):
"""
external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).
:param pulumi.Input[str] metric_name: metricName is the name of the metric in question.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalMetricSelectorArgs'] metric_selector: metricSelector is used to identify a specific time series within a given metric.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalTargetAverageValueArgs'] target_average_value: targetAverageValue is the target per-pod value of global metric (as a quantity). Mutually exclusive with TargetValue.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalTargetValueArgs'] target_value: targetValue is the target value of the metric (as a quantity). Mutually exclusive with TargetAverageValue.
"""
pulumi.set(__self__, "metric_name", metric_name)
if metric_selector is not None:
pulumi.set(__self__, "metric_selector", metric_selector)
if target_average_value is not None:
pulumi.set(__self__, "target_average_value", target_average_value)
if target_value is not None:
pulumi.set(__self__, "target_value", target_value)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
metricName is the name of the metric in question.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter(name="metricSelector")
def metric_selector(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalMetricSelectorArgs']]:
"""
metricSelector is used to identify a specific time series within a given metric.
"""
return pulumi.get(self, "metric_selector")
@metric_selector.setter
def metric_selector(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalMetricSelectorArgs']]):
pulumi.set(self, "metric_selector", value)
@property
@pulumi.getter(name="targetAverageValue")
def target_average_value(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalTargetAverageValueArgs']]:
"""
targetAverageValue is the target per-pod value of global metric (as a quantity). Mutually exclusive with TargetValue.
"""
return pulumi.get(self, "target_average_value")
@target_average_value.setter
def target_average_value(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalTargetAverageValueArgs']]):
pulumi.set(self, "target_average_value", value)
@property
@pulumi.getter(name="targetValue")
def target_value(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalTargetValueArgs']]:
"""
targetValue is the target value of the metric (as a quantity). Mutually exclusive with TargetAverageValue.
"""
return pulumi.get(self, "target_value")
@target_value.setter
def target_value(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalTargetValueArgs']]):
pulumi.set(self, "target_value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalMetricSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalMetricSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
metricSelector is used to identify a specific time series within a given metric.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalMetricSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalMetricSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalMetricSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalMetricSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalTargetAverageValueArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsExternalTargetValueArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectArgs:
def __init__(__self__, *,
metric_name: pulumi.Input[str],
target: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectTargetArgs'],
target_value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectTargetValueArgs'],
average_value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectAverageValueArgs']] = None,
selector: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectSelectorArgs']] = None):
"""
object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
:param pulumi.Input[str] metric_name: metricName is the name of the metric in question.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectTargetArgs'] target: target is the described Kubernetes object.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectTargetValueArgs'] target_value: targetValue is the target value of the metric (as a quantity).
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectAverageValueArgs'] average_value: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectSelectorArgs'] selector: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.
"""
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "target", target)
pulumi.set(__self__, "target_value", target_value)
if average_value is not None:
pulumi.set(__self__, "average_value", average_value)
if selector is not None:
pulumi.set(__self__, "selector", selector)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
metricName is the name of the metric in question.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter
def target(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectTargetArgs']:
"""
target is the described Kubernetes object.
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectTargetArgs']):
pulumi.set(self, "target", value)
@property
@pulumi.getter(name="targetValue")
def target_value(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectTargetValueArgs']:
"""
targetValue is the target value of the metric (as a quantity).
"""
return pulumi.get(self, "target_value")
@target_value.setter
def target_value(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectTargetValueArgs']):
pulumi.set(self, "target_value", value)
@property
@pulumi.getter(name="averageValue")
def average_value(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectAverageValueArgs']]:
"""
averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
"""
return pulumi.get(self, "average_value")
@average_value.setter
def average_value(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectAverageValueArgs']]):
pulumi.set(self, "average_value", value)
@property
@pulumi.getter
def selector(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectSelectorArgs']]:
"""
selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.
"""
return pulumi.get(self, "selector")
@selector.setter
def selector(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectSelectorArgs']]):
pulumi.set(self, "selector", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectAverageValueArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectTargetArgs:
def __init__(__self__, *,
kind: pulumi.Input[str],
name: pulumi.Input[str],
api_version: Optional[pulumi.Input[str]] = None):
"""
target is the described Kubernetes object.
:param pulumi.Input[str] kind: Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
:param pulumi.Input[str] name: Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
:param pulumi.Input[str] api_version: API version of the referent
"""
pulumi.set(__self__, "kind", kind)
pulumi.set(__self__, "name", name)
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[str]:
"""
Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[str]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
API version of the referent
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsObjectTargetValueArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsArgs:
def __init__(__self__, *,
metric_name: pulumi.Input[str],
target_average_value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsTargetAverageValueArgs'],
selector: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsSelectorArgs']] = None):
"""
pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
:param pulumi.Input[str] metric_name: metricName is the name of the metric in question
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsTargetAverageValueArgs'] target_average_value: targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsSelectorArgs'] selector: selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.
"""
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "target_average_value", target_average_value)
if selector is not None:
pulumi.set(__self__, "selector", selector)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> pulumi.Input[str]:
"""
metricName is the name of the metric in question
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: pulumi.Input[str]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter(name="targetAverageValue")
def target_average_value(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsTargetAverageValueArgs']:
"""
targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)
"""
return pulumi.get(self, "target_average_value")
@target_average_value.setter
def target_average_value(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsTargetAverageValueArgs']):
pulumi.set(self, "target_average_value", value)
@property
@pulumi.getter
def selector(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsSelectorArgs']]:
"""
selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.
"""
return pulumi.get(self, "selector")
@selector.setter
def selector(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsSelectorArgs']]):
pulumi.set(self, "selector", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsPodsTargetAverageValueArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsResourceArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
target_average_utilization: Optional[pulumi.Input[int]] = None,
target_average_value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsResourceTargetAverageValueArgs']] = None):
"""
resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source.
:param pulumi.Input[str] name: name is the name of the resource in question.
:param pulumi.Input[int] target_average_utilization: targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsResourceTargetAverageValueArgs'] target_average_value: targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the "pods" metric source type.
"""
pulumi.set(__self__, "name", name)
if target_average_utilization is not None:
pulumi.set(__self__, "target_average_utilization", target_average_utilization)
if target_average_value is not None:
pulumi.set(__self__, "target_average_value", target_average_value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
name is the name of the resource in question.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="targetAverageUtilization")
def target_average_utilization(self) -> Optional[pulumi.Input[int]]:
"""
targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.
"""
return pulumi.get(self, "target_average_utilization")
@target_average_utilization.setter
def target_average_utilization(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "target_average_utilization", value)
@property
@pulumi.getter(name="targetAverageValue")
def target_average_value(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsResourceTargetAverageValueArgs']]:
"""
targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the "pods" metric source type.
"""
return pulumi.get(self, "target_average_value")
@target_average_value.setter
def target_average_value(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsResourceTargetAverageValueArgs']]):
pulumi.set(self, "target_average_value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsHpaSpecMetricsResourceTargetAverageValueArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecArgs:
def __init__(__self__, *,
triggers: pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecTriggersArgs']]],
advanced: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedArgs']] = None,
cooldown_period: Optional[pulumi.Input[int]] = None,
max_replica_count: Optional[pulumi.Input[int]] = None,
min_replica_count: Optional[pulumi.Input[int]] = None,
polling_interval: Optional[pulumi.Input[int]] = None):
"""
SeldonScaledObjectSpec is the spec for a KEDA ScaledObject resource
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedArgs'] advanced: AdvancedConfig specifies advance scaling options
"""
pulumi.set(__self__, "triggers", triggers)
if advanced is not None:
pulumi.set(__self__, "advanced", advanced)
if cooldown_period is not None:
pulumi.set(__self__, "cooldown_period", cooldown_period)
if max_replica_count is not None:
pulumi.set(__self__, "max_replica_count", max_replica_count)
if min_replica_count is not None:
pulumi.set(__self__, "min_replica_count", min_replica_count)
if polling_interval is not None:
pulumi.set(__self__, "polling_interval", polling_interval)
@property
@pulumi.getter
def triggers(self) -> pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecTriggersArgs']]]:
return pulumi.get(self, "triggers")
@triggers.setter
def triggers(self, value: pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecTriggersArgs']]]):
pulumi.set(self, "triggers", value)
@property
@pulumi.getter
def advanced(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedArgs']]:
"""
AdvancedConfig specifies advance scaling options
"""
return pulumi.get(self, "advanced")
@advanced.setter
def advanced(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedArgs']]):
pulumi.set(self, "advanced", value)
@property
@pulumi.getter(name="cooldownPeriod")
def cooldown_period(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "cooldown_period")
@cooldown_period.setter
def cooldown_period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cooldown_period", value)
@property
@pulumi.getter(name="maxReplicaCount")
def max_replica_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max_replica_count")
@max_replica_count.setter
def max_replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_replica_count", value)
@property
@pulumi.getter(name="minReplicaCount")
def min_replica_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min_replica_count")
@min_replica_count.setter
def min_replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_replica_count", value)
@property
@pulumi.getter(name="pollingInterval")
def polling_interval(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "polling_interval")
@polling_interval.setter
def polling_interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "polling_interval", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedArgs:
def __init__(__self__, *,
horizontal_pod_autoscaler_config: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigArgs']] = None,
restore_to_original_replica_count: Optional[pulumi.Input[bool]] = None):
"""
AdvancedConfig specifies advance scaling options
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigArgs'] horizontal_pod_autoscaler_config: HorizontalPodAutoscalerConfig specifies horizontal scale config
"""
if horizontal_pod_autoscaler_config is not None:
pulumi.set(__self__, "horizontal_pod_autoscaler_config", horizontal_pod_autoscaler_config)
if restore_to_original_replica_count is not None:
pulumi.set(__self__, "restore_to_original_replica_count", restore_to_original_replica_count)
@property
@pulumi.getter(name="horizontalPodAutoscalerConfig")
def horizontal_pod_autoscaler_config(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigArgs']]:
"""
HorizontalPodAutoscalerConfig specifies horizontal scale config
"""
return pulumi.get(self, "horizontal_pod_autoscaler_config")
@horizontal_pod_autoscaler_config.setter
def horizontal_pod_autoscaler_config(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigArgs']]):
pulumi.set(self, "horizontal_pod_autoscaler_config", value)
@property
@pulumi.getter(name="restoreToOriginalReplicaCount")
def restore_to_original_replica_count(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "restore_to_original_replica_count")
@restore_to_original_replica_count.setter
def restore_to_original_replica_count(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "restore_to_original_replica_count", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigArgs:
def __init__(__self__, *,
behavior: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorArgs']] = None,
resource_metrics: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsArgs']]]] = None):
"""
HorizontalPodAutoscalerConfig specifies horizontal scale config
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorArgs'] behavior: HorizontalPodAutoscalerBehavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively).
"""
if behavior is not None:
pulumi.set(__self__, "behavior", behavior)
if resource_metrics is not None:
pulumi.set(__self__, "resource_metrics", resource_metrics)
@property
@pulumi.getter
def behavior(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorArgs']]:
"""
HorizontalPodAutoscalerBehavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively).
"""
return pulumi.get(self, "behavior")
@behavior.setter
def behavior(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorArgs']]):
pulumi.set(self, "behavior", value)
@property
@pulumi.getter(name="resourceMetrics")
def resource_metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsArgs']]]]:
return pulumi.get(self, "resource_metrics")
@resource_metrics.setter
def resource_metrics(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsArgs']]]]):
pulumi.set(self, "resource_metrics", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorArgs:
def __init__(__self__, *,
scale_down: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleDownArgs']] = None,
scale_up: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleUpArgs']] = None):
"""
HorizontalPodAutoscalerBehavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively).
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleDownArgs'] scale_down: scaleDown is scaling policy for scaling Down. If not set, the default value is to allow to scale down to minReplicas pods, with a 300 second stabilization window (i.e., the highest recommendation for the last 300sec is used).
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleUpArgs'] scale_up: scaleUp is scaling policy for scaling Up. If not set, the default value is the higher of: * increase no more than 4 pods per 60 seconds * double the number of pods per 60 seconds No stabilization is used.
"""
if scale_down is not None:
pulumi.set(__self__, "scale_down", scale_down)
if scale_up is not None:
pulumi.set(__self__, "scale_up", scale_up)
@property
@pulumi.getter(name="scaleDown")
def scale_down(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleDownArgs']]:
"""
scaleDown is scaling policy for scaling Down. If not set, the default value is to allow to scale down to minReplicas pods, with a 300 second stabilization window (i.e., the highest recommendation for the last 300sec is used).
"""
return pulumi.get(self, "scale_down")
@scale_down.setter
def scale_down(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleDownArgs']]):
pulumi.set(self, "scale_down", value)
@property
@pulumi.getter(name="scaleUp")
def scale_up(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleUpArgs']]:
"""
scaleUp is scaling policy for scaling Up. If not set, the default value is the higher of: * increase no more than 4 pods per 60 seconds * double the number of pods per 60 seconds No stabilization is used.
"""
return pulumi.get(self, "scale_up")
@scale_up.setter
def scale_up(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleUpArgs']]):
pulumi.set(self, "scale_up", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleDownArgs:
def __init__(__self__, *,
policies: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleDownPoliciesArgs']]]] = None,
select_policy: Optional[pulumi.Input[str]] = None,
stabilization_window_seconds: Optional[pulumi.Input[int]] = None):
"""
scaleDown is scaling policy for scaling Down. If not set, the default value is to allow to scale down to minReplicas pods, with a 300 second stabilization window (i.e., the highest recommendation for the last 300sec is used).
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleDownPoliciesArgs']]] policies: policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid
:param pulumi.Input[str] select_policy: selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used.
:param pulumi.Input[int] stabilization_window_seconds: StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).
"""
if policies is not None:
pulumi.set(__self__, "policies", policies)
if select_policy is not None:
pulumi.set(__self__, "select_policy", select_policy)
if stabilization_window_seconds is not None:
pulumi.set(__self__, "stabilization_window_seconds", stabilization_window_seconds)
@property
@pulumi.getter
def policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleDownPoliciesArgs']]]]:
"""
policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid
"""
return pulumi.get(self, "policies")
@policies.setter
def policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleDownPoliciesArgs']]]]):
pulumi.set(self, "policies", value)
@property
@pulumi.getter(name="selectPolicy")
def select_policy(self) -> Optional[pulumi.Input[str]]:
"""
selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used.
"""
return pulumi.get(self, "select_policy")
@select_policy.setter
def select_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "select_policy", value)
@property
@pulumi.getter(name="stabilizationWindowSeconds")
def stabilization_window_seconds(self) -> Optional[pulumi.Input[int]]:
"""
StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).
"""
return pulumi.get(self, "stabilization_window_seconds")
@stabilization_window_seconds.setter
def stabilization_window_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "stabilization_window_seconds", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleDownPoliciesArgs:
def __init__(__self__, *,
period_seconds: pulumi.Input[int],
type: pulumi.Input[str],
value: pulumi.Input[int]):
"""
HPAScalingPolicy is a single policy which must hold true for a specified past interval.
:param pulumi.Input[int] period_seconds: PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).
:param pulumi.Input[str] type: Type is used to specify the scaling policy.
:param pulumi.Input[int] value: Value contains the amount of change which is permitted by the policy. It must be greater than zero
"""
pulumi.set(__self__, "period_seconds", period_seconds)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> pulumi.Input[int]:
"""
PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).
"""
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: pulumi.Input[int]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type is used to specify the scaling policy.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[int]:
"""
Value contains the amount of change which is permitted by the policy. It must be greater than zero
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[int]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleUpArgs:
def __init__(__self__, *,
policies: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleUpPoliciesArgs']]]] = None,
select_policy: Optional[pulumi.Input[str]] = None,
stabilization_window_seconds: Optional[pulumi.Input[int]] = None):
"""
scaleUp is scaling policy for scaling Up. If not set, the default value is the higher of: * increase no more than 4 pods per 60 seconds * double the number of pods per 60 seconds No stabilization is used.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleUpPoliciesArgs']]] policies: policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid
:param pulumi.Input[str] select_policy: selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used.
:param pulumi.Input[int] stabilization_window_seconds: StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).
"""
if policies is not None:
pulumi.set(__self__, "policies", policies)
if select_policy is not None:
pulumi.set(__self__, "select_policy", select_policy)
if stabilization_window_seconds is not None:
pulumi.set(__self__, "stabilization_window_seconds", stabilization_window_seconds)
@property
@pulumi.getter
def policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleUpPoliciesArgs']]]]:
"""
policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid
"""
return pulumi.get(self, "policies")
@policies.setter
def policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleUpPoliciesArgs']]]]):
pulumi.set(self, "policies", value)
@property
@pulumi.getter(name="selectPolicy")
def select_policy(self) -> Optional[pulumi.Input[str]]:
"""
selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used.
"""
return pulumi.get(self, "select_policy")
@select_policy.setter
def select_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "select_policy", value)
@property
@pulumi.getter(name="stabilizationWindowSeconds")
def stabilization_window_seconds(self) -> Optional[pulumi.Input[int]]:
"""
StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).
"""
return pulumi.get(self, "stabilization_window_seconds")
@stabilization_window_seconds.setter
def stabilization_window_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "stabilization_window_seconds", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigBehaviorScaleUpPoliciesArgs:
def __init__(__self__, *,
period_seconds: pulumi.Input[int],
type: pulumi.Input[str],
value: pulumi.Input[int]):
"""
HPAScalingPolicy is a single policy which must hold true for a specified past interval.
:param pulumi.Input[int] period_seconds: PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).
:param pulumi.Input[str] type: Type is used to specify the scaling policy.
:param pulumi.Input[int] value: Value contains the amount of change which is permitted by the policy. It must be greater than zero
"""
pulumi.set(__self__, "period_seconds", period_seconds)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> pulumi.Input[int]:
"""
PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).
"""
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: pulumi.Input[int]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type is used to specify the scaling policy.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[int]:
"""
Value contains the amount of change which is permitted by the policy. It must be greater than zero
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[int]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
target: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsTargetArgs']):
"""
ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the "pods" source. Only one "target" type should be set.
:param pulumi.Input[str] name: name is the name of the resource in question.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsTargetArgs'] target: target specifies the target value for the given metric
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "target", target)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
name is the name of the resource in question.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def target(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsTargetArgs']:
"""
target specifies the target value for the given metric
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsTargetArgs']):
pulumi.set(self, "target", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsTargetArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
average_utilization: Optional[pulumi.Input[int]] = None,
average_value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsTargetAverageValueArgs']] = None,
value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsTargetValueArgs']] = None):
"""
target specifies the target value for the given metric
:param pulumi.Input[str] type: type represents whether the metric type is Utilization, Value, or AverageValue
:param pulumi.Input[int] average_utilization: averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsTargetAverageValueArgs'] average_value: averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsTargetValueArgs'] value: value is the target value of the metric (as a quantity).
"""
pulumi.set(__self__, "type", type)
if average_utilization is not None:
pulumi.set(__self__, "average_utilization", average_utilization)
if average_value is not None:
pulumi.set(__self__, "average_value", average_value)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
type represents whether the metric type is Utilization, Value, or AverageValue
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="averageUtilization")
def average_utilization(self) -> Optional[pulumi.Input[int]]:
"""
averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type
"""
return pulumi.get(self, "average_utilization")
@average_utilization.setter
def average_utilization(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "average_utilization", value)
@property
@pulumi.getter(name="averageValue")
def average_value(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsTargetAverageValueArgs']]:
"""
averageValue is the target value of the average of the metric across all relevant pods (as a quantity)
"""
return pulumi.get(self, "average_value")
@average_value.setter
def average_value(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsTargetAverageValueArgs']]):
pulumi.set(self, "average_value", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsTargetValueArgs']]:
"""
value is the target value of the metric (as a quantity).
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsTargetValueArgs']]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsTargetAverageValueArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecAdvancedHorizontalPodAutoscalerConfigResourceMetricsTargetValueArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecTriggersArgs:
def __init__(__self__, *,
metadata: pulumi.Input[Mapping[str, pulumi.Input[str]]],
type: pulumi.Input[str],
authentication_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecTriggersAuthenticationRefArgs']] = None,
name: Optional[pulumi.Input[str]] = None):
"""
ScaleTriggers reference the scaler that will be used
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecTriggersAuthenticationRefArgs'] authentication_ref: ScaledObjectAuthRef points to the TriggerAuthentication object that is used to authenticate the scaler with the environment
"""
pulumi.set(__self__, "metadata", metadata)
pulumi.set(__self__, "type", type)
if authentication_ref is not None:
pulumi.set(__self__, "authentication_ref", authentication_ref)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def metadata(self) -> pulumi.Input[Mapping[str, pulumi.Input[str]]]:
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: pulumi.Input[Mapping[str, pulumi.Input[str]]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="authenticationRef")
def authentication_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecTriggersAuthenticationRefArgs']]:
"""
ScaledObjectAuthRef points to the TriggerAuthentication object that is used to authenticate the scaler with the environment
"""
return pulumi.get(self, "authentication_ref")
@authentication_ref.setter
def authentication_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecTriggersAuthenticationRefArgs']]):
pulumi.set(self, "authentication_ref", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsKedaSpecTriggersAuthenticationRefArgs:
def __init__(__self__, *,
name: pulumi.Input[str]):
"""
ScaledObjectAuthRef points to the TriggerAuthentication object that is used to authenticate the scaler with the environment
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsPdbSpecArgs:
def __init__(__self__, *,
max_unavailable: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsPdbSpecMaxUnavailableArgs']] = None,
min_available: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsPdbSpecMinAvailableArgs']] = None):
"""
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsPdbSpecMaxUnavailableArgs'] max_unavailable: An eviction is allowed if at most "maxUnavailable" pods in the deployment corresponding to a componentSpec are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. MaxUnavailable and MinAvailable are mutually exclusive.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsPdbSpecMinAvailableArgs'] min_available: An eviction is allowed if at least "minAvailable" pods in the deployment corresponding to a componentSpec will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying "100%".
"""
if max_unavailable is not None:
pulumi.set(__self__, "max_unavailable", max_unavailable)
if min_available is not None:
pulumi.set(__self__, "min_available", min_available)
@property
@pulumi.getter(name="maxUnavailable")
def max_unavailable(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsPdbSpecMaxUnavailableArgs']]:
"""
An eviction is allowed if at most "maxUnavailable" pods in the deployment corresponding to a componentSpec are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. MaxUnavailable and MinAvailable are mutually exclusive.
"""
return pulumi.get(self, "max_unavailable")
@max_unavailable.setter
def max_unavailable(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsPdbSpecMaxUnavailableArgs']]):
pulumi.set(self, "max_unavailable", value)
@property
@pulumi.getter(name="minAvailable")
def min_available(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsPdbSpecMinAvailableArgs']]:
"""
An eviction is allowed if at least "minAvailable" pods in the deployment corresponding to a componentSpec will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying "100%".
"""
return pulumi.get(self, "min_available")
@min_available.setter
def min_available(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsPdbSpecMinAvailableArgs']]):
pulumi.set(self, "min_available", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsPdbSpecMaxUnavailableArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsPdbSpecMinAvailableArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecArgs:
def __init__(__self__, *,
containers: pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersArgs']]],
active_deadline_seconds: Optional[pulumi.Input[int]] = None,
affinity: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityArgs']] = None,
automount_service_account_token: Optional[pulumi.Input[bool]] = None,
dns_config: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecDnsConfigArgs']] = None,
dns_policy: Optional[pulumi.Input[str]] = None,
enable_service_links: Optional[pulumi.Input[bool]] = None,
ephemeral_containers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersArgs']]]] = None,
host_aliases: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecHostAliasesArgs']]]] = None,
host_ipc: Optional[pulumi.Input[bool]] = None,
host_network: Optional[pulumi.Input[bool]] = None,
host_pid: Optional[pulumi.Input[bool]] = None,
hostname: Optional[pulumi.Input[str]] = None,
image_pull_secrets: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecImagePullSecretsArgs']]]] = None,
init_containers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersArgs']]]] = None,
node_name: Optional[pulumi.Input[str]] = None,
node_selector: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
overhead: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecOverheadArgs']]]] = None,
preemption_policy: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
priority_class_name: Optional[pulumi.Input[str]] = None,
readiness_gates: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecReadinessGatesArgs']]]] = None,
restart_policy: Optional[pulumi.Input[str]] = None,
runtime_class_name: Optional[pulumi.Input[str]] = None,
scheduler_name: Optional[pulumi.Input[str]] = None,
security_context: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextArgs']] = None,
service_account: Optional[pulumi.Input[str]] = None,
service_account_name: Optional[pulumi.Input[str]] = None,
share_process_namespace: Optional[pulumi.Input[bool]] = None,
subdomain: Optional[pulumi.Input[str]] = None,
termination_grace_period_seconds: Optional[pulumi.Input[int]] = None,
tolerations: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecTolerationsArgs']]]] = None,
topology_spread_constraints: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraintsArgs']]]] = None,
volumes: Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, Any]]]]] = None):
"""
PodSpec is a description of a pod.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersArgs']]] containers: List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.
:param pulumi.Input[int] active_deadline_seconds: Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityArgs'] affinity: If specified, the pod's scheduling constraints
:param pulumi.Input[bool] automount_service_account_token: AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecDnsConfigArgs'] dns_config: Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.
:param pulumi.Input[str] dns_policy: Set DNS policy for the pod. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.
:param pulumi.Input[bool] enable_service_links: EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersArgs']]] ephemeral_containers: List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecHostAliasesArgs']]] host_aliases: HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.
:param pulumi.Input[bool] host_ipc: Use the host's ipc namespace. Optional: Default to false.
:param pulumi.Input[bool] host_network: Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.
:param pulumi.Input[bool] host_pid: Use the host's pid namespace. Optional: Default to false.
:param pulumi.Input[str] hostname: Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecImagePullSecretsArgs']]] image_pull_secrets: ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersArgs']]] init_containers: List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
:param pulumi.Input[str] node_name: NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] node_selector: NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
:param pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecOverheadArgs']]] overhead: Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.
:param pulumi.Input[str] preemption_policy: PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.
:param pulumi.Input[int] priority: The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.
:param pulumi.Input[str] priority_class_name: If specified, indicates the pod's priority. "system-node-critical" and "system-cluster-critical" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecReadinessGatesArgs']]] readiness_gates: If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md
:param pulumi.Input[str] restart_policy: Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
:param pulumi.Input[str] runtime_class_name: RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.
:param pulumi.Input[str] scheduler_name: If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextArgs'] security_context: SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.
:param pulumi.Input[str] service_account: DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.
:param pulumi.Input[str] service_account_name: ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
:param pulumi.Input[bool] share_process_namespace: Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.
:param pulumi.Input[str] subdomain: If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>". If not specified, the pod will not have a domainname at all.
:param pulumi.Input[int] termination_grace_period_seconds: Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecTolerationsArgs']]] tolerations: If specified, the pod's tolerations.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraintsArgs']]] topology_spread_constraints: TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. This field is only honored by clusters that enable the EvenPodsSpread feature. All topologySpreadConstraints are ANDed.
:param pulumi.Input[Sequence[pulumi.Input[Mapping[str, Any]]]] volumes: List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes
"""
pulumi.set(__self__, "containers", containers)
if active_deadline_seconds is not None:
pulumi.set(__self__, "active_deadline_seconds", active_deadline_seconds)
if affinity is not None:
pulumi.set(__self__, "affinity", affinity)
if automount_service_account_token is not None:
pulumi.set(__self__, "automount_service_account_token", automount_service_account_token)
if dns_config is not None:
pulumi.set(__self__, "dns_config", dns_config)
if dns_policy is not None:
pulumi.set(__self__, "dns_policy", dns_policy)
if enable_service_links is not None:
pulumi.set(__self__, "enable_service_links", enable_service_links)
if ephemeral_containers is not None:
pulumi.set(__self__, "ephemeral_containers", ephemeral_containers)
if host_aliases is not None:
pulumi.set(__self__, "host_aliases", host_aliases)
if host_ipc is not None:
pulumi.set(__self__, "host_ipc", host_ipc)
if host_network is not None:
pulumi.set(__self__, "host_network", host_network)
if host_pid is not None:
pulumi.set(__self__, "host_pid", host_pid)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if image_pull_secrets is not None:
pulumi.set(__self__, "image_pull_secrets", image_pull_secrets)
if init_containers is not None:
pulumi.set(__self__, "init_containers", init_containers)
if node_name is not None:
pulumi.set(__self__, "node_name", node_name)
if node_selector is not None:
pulumi.set(__self__, "node_selector", node_selector)
if overhead is not None:
pulumi.set(__self__, "overhead", overhead)
if preemption_policy is not None:
pulumi.set(__self__, "preemption_policy", preemption_policy)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if priority_class_name is not None:
pulumi.set(__self__, "priority_class_name", priority_class_name)
if readiness_gates is not None:
pulumi.set(__self__, "readiness_gates", readiness_gates)
if restart_policy is not None:
pulumi.set(__self__, "restart_policy", restart_policy)
if runtime_class_name is not None:
pulumi.set(__self__, "runtime_class_name", runtime_class_name)
if scheduler_name is not None:
pulumi.set(__self__, "scheduler_name", scheduler_name)
if security_context is not None:
pulumi.set(__self__, "security_context", security_context)
if service_account is not None:
pulumi.set(__self__, "service_account", service_account)
if service_account_name is not None:
pulumi.set(__self__, "service_account_name", service_account_name)
if share_process_namespace is not None:
pulumi.set(__self__, "share_process_namespace", share_process_namespace)
if subdomain is not None:
pulumi.set(__self__, "subdomain", subdomain)
if termination_grace_period_seconds is not None:
pulumi.set(__self__, "termination_grace_period_seconds", termination_grace_period_seconds)
if tolerations is not None:
pulumi.set(__self__, "tolerations", tolerations)
if topology_spread_constraints is not None:
pulumi.set(__self__, "topology_spread_constraints", topology_spread_constraints)
if volumes is not None:
pulumi.set(__self__, "volumes", volumes)
@property
@pulumi.getter
def containers(self) -> pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersArgs']]]:
"""
List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.
"""
return pulumi.get(self, "containers")
@containers.setter
def containers(self, value: pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersArgs']]]):
pulumi.set(self, "containers", value)
@property
@pulumi.getter(name="activeDeadlineSeconds")
def active_deadline_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.
"""
return pulumi.get(self, "active_deadline_seconds")
@active_deadline_seconds.setter
def active_deadline_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "active_deadline_seconds", value)
@property
@pulumi.getter
def affinity(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityArgs']]:
"""
If specified, the pod's scheduling constraints
"""
return pulumi.get(self, "affinity")
@affinity.setter
def affinity(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityArgs']]):
pulumi.set(self, "affinity", value)
@property
@pulumi.getter(name="automountServiceAccountToken")
def automount_service_account_token(self) -> Optional[pulumi.Input[bool]]:
"""
AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
"""
return pulumi.get(self, "automount_service_account_token")
@automount_service_account_token.setter
def automount_service_account_token(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "automount_service_account_token", value)
@property
@pulumi.getter(name="dnsConfig")
def dns_config(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecDnsConfigArgs']]:
"""
Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.
"""
return pulumi.get(self, "dns_config")
@dns_config.setter
def dns_config(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecDnsConfigArgs']]):
pulumi.set(self, "dns_config", value)
@property
@pulumi.getter(name="dnsPolicy")
def dns_policy(self) -> Optional[pulumi.Input[str]]:
"""
Set DNS policy for the pod. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.
"""
return pulumi.get(self, "dns_policy")
@dns_policy.setter
def dns_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dns_policy", value)
@property
@pulumi.getter(name="enableServiceLinks")
def enable_service_links(self) -> Optional[pulumi.Input[bool]]:
"""
EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.
"""
return pulumi.get(self, "enable_service_links")
@enable_service_links.setter
def enable_service_links(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_service_links", value)
@property
@pulumi.getter(name="ephemeralContainers")
def ephemeral_containers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersArgs']]]]:
"""
List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.
"""
return pulumi.get(self, "ephemeral_containers")
@ephemeral_containers.setter
def ephemeral_containers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersArgs']]]]):
pulumi.set(self, "ephemeral_containers", value)
@property
@pulumi.getter(name="hostAliases")
def host_aliases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecHostAliasesArgs']]]]:
"""
HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.
"""
return pulumi.get(self, "host_aliases")
@host_aliases.setter
def host_aliases(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecHostAliasesArgs']]]]):
pulumi.set(self, "host_aliases", value)
@property
@pulumi.getter(name="hostIPC")
def host_ipc(self) -> Optional[pulumi.Input[bool]]:
"""
Use the host's ipc namespace. Optional: Default to false.
"""
return pulumi.get(self, "host_ipc")
@host_ipc.setter
def host_ipc(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "host_ipc", value)
@property
@pulumi.getter(name="hostNetwork")
def host_network(self) -> Optional[pulumi.Input[bool]]:
"""
Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.
"""
return pulumi.get(self, "host_network")
@host_network.setter
def host_network(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "host_network", value)
@property
@pulumi.getter(name="hostPID")
def host_pid(self) -> Optional[pulumi.Input[bool]]:
"""
Use the host's pid namespace. Optional: Default to false.
"""
return pulumi.get(self, "host_pid")
@host_pid.setter
def host_pid(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "host_pid", value)
@property
@pulumi.getter
def hostname(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.
"""
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter(name="imagePullSecrets")
def image_pull_secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecImagePullSecretsArgs']]]]:
"""
ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
"""
return pulumi.get(self, "image_pull_secrets")
@image_pull_secrets.setter
def image_pull_secrets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecImagePullSecretsArgs']]]]):
pulumi.set(self, "image_pull_secrets", value)
@property
@pulumi.getter(name="initContainers")
def init_containers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersArgs']]]]:
"""
List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
"""
return pulumi.get(self, "init_containers")
@init_containers.setter
def init_containers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersArgs']]]]):
pulumi.set(self, "init_containers", value)
@property
@pulumi.getter(name="nodeName")
def node_name(self) -> Optional[pulumi.Input[str]]:
"""
NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.
"""
return pulumi.get(self, "node_name")
@node_name.setter
def node_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_name", value)
@property
@pulumi.getter(name="nodeSelector")
def node_selector(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
"""
return pulumi.get(self, "node_selector")
@node_selector.setter
def node_selector(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "node_selector", value)
@property
@pulumi.getter
def overhead(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecOverheadArgs']]]]:
"""
Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.
"""
return pulumi.get(self, "overhead")
@overhead.setter
def overhead(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecOverheadArgs']]]]):
pulumi.set(self, "overhead", value)
@property
@pulumi.getter(name="preemptionPolicy")
def preemption_policy(self) -> Optional[pulumi.Input[str]]:
"""
PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.
"""
return pulumi.get(self, "preemption_policy")
@preemption_policy.setter
def preemption_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "preemption_policy", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="priorityClassName")
def priority_class_name(self) -> Optional[pulumi.Input[str]]:
"""
If specified, indicates the pod's priority. "system-node-critical" and "system-cluster-critical" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.
"""
return pulumi.get(self, "priority_class_name")
@priority_class_name.setter
def priority_class_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "priority_class_name", value)
@property
@pulumi.getter(name="readinessGates")
def readiness_gates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecReadinessGatesArgs']]]]:
"""
If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md
"""
return pulumi.get(self, "readiness_gates")
@readiness_gates.setter
def readiness_gates(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecReadinessGatesArgs']]]]):
pulumi.set(self, "readiness_gates", value)
@property
@pulumi.getter(name="restartPolicy")
def restart_policy(self) -> Optional[pulumi.Input[str]]:
"""
Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
"""
return pulumi.get(self, "restart_policy")
@restart_policy.setter
def restart_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "restart_policy", value)
@property
@pulumi.getter(name="runtimeClassName")
def runtime_class_name(self) -> Optional[pulumi.Input[str]]:
"""
RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.
"""
return pulumi.get(self, "runtime_class_name")
@runtime_class_name.setter
def runtime_class_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "runtime_class_name", value)
@property
@pulumi.getter(name="schedulerName")
def scheduler_name(self) -> Optional[pulumi.Input[str]]:
"""
If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.
"""
return pulumi.get(self, "scheduler_name")
@scheduler_name.setter
def scheduler_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheduler_name", value)
@property
@pulumi.getter(name="securityContext")
def security_context(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextArgs']]:
"""
SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.
"""
return pulumi.get(self, "security_context")
@security_context.setter
def security_context(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextArgs']]):
pulumi.set(self, "security_context", value)
@property
@pulumi.getter(name="serviceAccount")
def service_account(self) -> Optional[pulumi.Input[str]]:
"""
DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.
"""
return pulumi.get(self, "service_account")
@service_account.setter
def service_account(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account", value)
@property
@pulumi.getter(name="serviceAccountName")
def service_account_name(self) -> Optional[pulumi.Input[str]]:
"""
ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
"""
return pulumi.get(self, "service_account_name")
@service_account_name.setter
def service_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account_name", value)
@property
@pulumi.getter(name="shareProcessNamespace")
def share_process_namespace(self) -> Optional[pulumi.Input[bool]]:
"""
Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.
"""
return pulumi.get(self, "share_process_namespace")
@share_process_namespace.setter
def share_process_namespace(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "share_process_namespace", value)
@property
@pulumi.getter
def subdomain(self) -> Optional[pulumi.Input[str]]:
"""
If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>". If not specified, the pod will not have a domainname at all.
"""
return pulumi.get(self, "subdomain")
@subdomain.setter
def subdomain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subdomain", value)
@property
@pulumi.getter(name="terminationGracePeriodSeconds")
def termination_grace_period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.
"""
return pulumi.get(self, "termination_grace_period_seconds")
@termination_grace_period_seconds.setter
def termination_grace_period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "termination_grace_period_seconds", value)
@property
@pulumi.getter
def tolerations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecTolerationsArgs']]]]:
"""
If specified, the pod's tolerations.
"""
return pulumi.get(self, "tolerations")
@tolerations.setter
def tolerations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecTolerationsArgs']]]]):
pulumi.set(self, "tolerations", value)
@property
@pulumi.getter(name="topologySpreadConstraints")
def topology_spread_constraints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraintsArgs']]]]:
"""
TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. This field is only honored by clusters that enable the EvenPodsSpread feature. All topologySpreadConstraints are ANDed.
"""
return pulumi.get(self, "topology_spread_constraints")
@topology_spread_constraints.setter
def topology_spread_constraints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraintsArgs']]]]):
pulumi.set(self, "topology_spread_constraints", value)
@property
@pulumi.getter
def volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, Any]]]]]:
"""
List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes
"""
return pulumi.get(self, "volumes")
@volumes.setter
def volumes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, Any]]]]]):
pulumi.set(self, "volumes", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityArgs:
def __init__(__self__, *,
node_affinity: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityArgs']] = None,
pod_affinity: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityArgs']] = None,
pod_anti_affinity: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityArgs']] = None):
"""
If specified, the pod's scheduling constraints
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityArgs'] node_affinity: Describes node affinity scheduling rules for the pod.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityArgs'] pod_affinity: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityArgs'] pod_anti_affinity: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
"""
if node_affinity is not None:
pulumi.set(__self__, "node_affinity", node_affinity)
if pod_affinity is not None:
pulumi.set(__self__, "pod_affinity", pod_affinity)
if pod_anti_affinity is not None:
pulumi.set(__self__, "pod_anti_affinity", pod_anti_affinity)
@property
@pulumi.getter(name="nodeAffinity")
def node_affinity(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityArgs']]:
"""
Describes node affinity scheduling rules for the pod.
"""
return pulumi.get(self, "node_affinity")
@node_affinity.setter
def node_affinity(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityArgs']]):
pulumi.set(self, "node_affinity", value)
@property
@pulumi.getter(name="podAffinity")
def pod_affinity(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityArgs']]:
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
"""
return pulumi.get(self, "pod_affinity")
@pod_affinity.setter
def pod_affinity(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityArgs']]):
pulumi.set(self, "pod_affinity", value)
@property
@pulumi.getter(name="podAntiAffinity")
def pod_anti_affinity(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityArgs']]:
"""
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
"""
return pulumi.get(self, "pod_anti_affinity")
@pod_anti_affinity.setter
def pod_anti_affinity(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityArgs']]):
pulumi.set(self, "pod_anti_affinity", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityArgs:
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]]] = None,
required_during_scheduling_ignored_during_execution: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']] = None):
"""
Describes node affinity scheduling rules for the pod.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs'] required_during_scheduling_ignored_during_execution: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]]]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@preferred_during_scheduling_ignored_during_execution.setter
def preferred_during_scheduling_ignored_during_execution(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]]]):
pulumi.set(self, "preferred_during_scheduling_ignored_during_execution", value)
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']]:
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
@required_during_scheduling_ignored_during_execution.setter
def required_during_scheduling_ignored_during_execution(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']]):
pulumi.set(self, "required_during_scheduling_ignored_during_execution", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs:
def __init__(__self__, *,
preference: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceArgs'],
weight: pulumi.Input[int]):
"""
An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceArgs'] preference: A node selector term, associated with the corresponding weight.
:param pulumi.Input[int] weight: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
"""
pulumi.set(__self__, "preference", preference)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def preference(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceArgs']:
"""
A node selector term, associated with the corresponding weight.
"""
return pulumi.get(self, "preference")
@preference.setter
def preference(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceArgs']):
pulumi.set(self, "preference", value)
@property
@pulumi.getter
def weight(self) -> pulumi.Input[int]:
"""
Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
@weight.setter
def weight(self, value: pulumi.Input[int]):
pulumi.set(self, "weight", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressionsArgs']]]] = None,
match_fields: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFieldsArgs']]]] = None):
"""
A node selector term, associated with the corresponding weight.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressionsArgs']]] match_expressions: A list of node selector requirements by node's labels.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFieldsArgs']]] match_fields: A list of node selector requirements by node's fields.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_fields is not None:
pulumi.set(__self__, "match_fields", match_fields)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressionsArgs']]]]:
"""
A list of node selector requirements by node's labels.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchFields")
def match_fields(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFieldsArgs']]]]:
"""
A list of node selector requirements by node's fields.
"""
return pulumi.get(self, "match_fields")
@match_fields.setter
def match_fields(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFieldsArgs']]]]):
pulumi.set(self, "match_fields", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: The label key that the selector applies to.
:param pulumi.Input[str] operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFieldsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: The label key that the selector applies to.
:param pulumi.Input[str] operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs:
def __init__(__self__, *,
node_selector_terms: pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsArgs']]]):
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsArgs']]] node_selector_terms: Required. A list of node selector terms. The terms are ORed.
"""
pulumi.set(__self__, "node_selector_terms", node_selector_terms)
@property
@pulumi.getter(name="nodeSelectorTerms")
def node_selector_terms(self) -> pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsArgs']]]:
"""
Required. A list of node selector terms. The terms are ORed.
"""
return pulumi.get(self, "node_selector_terms")
@node_selector_terms.setter
def node_selector_terms(self, value: pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsArgs']]]):
pulumi.set(self, "node_selector_terms", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressionsArgs']]]] = None,
match_fields: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFieldsArgs']]]] = None):
"""
A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressionsArgs']]] match_expressions: A list of node selector requirements by node's labels.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFieldsArgs']]] match_fields: A list of node selector requirements by node's fields.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_fields is not None:
pulumi.set(__self__, "match_fields", match_fields)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressionsArgs']]]]:
"""
A list of node selector requirements by node's labels.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchFields")
def match_fields(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFieldsArgs']]]]:
"""
A list of node selector requirements by node's fields.
"""
return pulumi.get(self, "match_fields")
@match_fields.setter
def match_fields(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFieldsArgs']]]]):
pulumi.set(self, "match_fields", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: The label key that the selector applies to.
:param pulumi.Input[str] operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFieldsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: The label key that the selector applies to.
:param pulumi.Input[str] operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityArgs:
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]]] = None,
required_during_scheduling_ignored_during_execution: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']]]] = None):
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']]] required_during_scheduling_ignored_during_execution: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]]]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@preferred_during_scheduling_ignored_during_execution.setter
def preferred_during_scheduling_ignored_during_execution(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]]]):
pulumi.set(self, "preferred_during_scheduling_ignored_during_execution", value)
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']]]]:
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
@required_during_scheduling_ignored_during_execution.setter
def required_during_scheduling_ignored_during_execution(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']]]]):
pulumi.set(self, "required_during_scheduling_ignored_during_execution", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs:
def __init__(__self__, *,
pod_affinity_term: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs'],
weight: pulumi.Input[int]):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs'] pod_affinity_term: Required. A pod affinity term, associated with the corresponding weight.
:param pulumi.Input[int] weight: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
pulumi.set(__self__, "pod_affinity_term", pod_affinity_term)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="podAffinityTerm")
def pod_affinity_term(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs']:
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
return pulumi.get(self, "pod_affinity_term")
@pod_affinity_term.setter
def pod_affinity_term(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs']):
pulumi.set(self, "pod_affinity_term", value)
@property
@pulumi.getter
def weight(self) -> pulumi.Input[int]:
"""
weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
@weight.setter
def weight(self, value: pulumi.Input[int]):
pulumi.set(self, "weight", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs:
def __init__(__self__, *,
topology_key: pulumi.Input[str],
label_selector: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs']] = None,
namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Required. A pod affinity term, associated with the corresponding weight.
:param pulumi.Input[str] topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs'] label_selector: A label query over a set of resources, in this case pods.
:param pulumi.Input[Sequence[pulumi.Input[str]]] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> pulumi.Input[str]:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@topology_key.setter
def topology_key(self, value: pulumi.Input[str]):
pulumi.set(self, "topology_key", value)
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs']]:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@label_selector.setter
def label_selector(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs']]):
pulumi.set(self, "label_selector", value)
@property
@pulumi.getter
def namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
@namespaces.setter
def namespaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "namespaces", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
A label query over a set of resources, in this case pods.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs:
def __init__(__self__, *,
topology_key: pulumi.Input[str],
label_selector: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs']] = None,
namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
:param pulumi.Input[str] topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs'] label_selector: A label query over a set of resources, in this case pods.
:param pulumi.Input[Sequence[pulumi.Input[str]]] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> pulumi.Input[str]:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@topology_key.setter
def topology_key(self, value: pulumi.Input[str]):
pulumi.set(self, "topology_key", value)
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs']]:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@label_selector.setter
def label_selector(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs']]):
pulumi.set(self, "label_selector", value)
@property
@pulumi.getter
def namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
@namespaces.setter
def namespaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "namespaces", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
A label query over a set of resources, in this case pods.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityArgs:
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]]] = None,
required_during_scheduling_ignored_during_execution: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']]]] = None):
"""
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']]] required_during_scheduling_ignored_during_execution: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]]]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@preferred_during_scheduling_ignored_during_execution.setter
def preferred_during_scheduling_ignored_during_execution(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs']]]]):
pulumi.set(self, "preferred_during_scheduling_ignored_during_execution", value)
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']]]]:
"""
If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
@required_during_scheduling_ignored_during_execution.setter
def required_during_scheduling_ignored_during_execution(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs']]]]):
pulumi.set(self, "required_during_scheduling_ignored_during_execution", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs:
def __init__(__self__, *,
pod_affinity_term: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs'],
weight: pulumi.Input[int]):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs'] pod_affinity_term: Required. A pod affinity term, associated with the corresponding weight.
:param pulumi.Input[int] weight: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
pulumi.set(__self__, "pod_affinity_term", pod_affinity_term)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="podAffinityTerm")
def pod_affinity_term(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs']:
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
return pulumi.get(self, "pod_affinity_term")
@pod_affinity_term.setter
def pod_affinity_term(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs']):
pulumi.set(self, "pod_affinity_term", value)
@property
@pulumi.getter
def weight(self) -> pulumi.Input[int]:
"""
weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
@weight.setter
def weight(self, value: pulumi.Input[int]):
pulumi.set(self, "weight", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs:
def __init__(__self__, *,
topology_key: pulumi.Input[str],
label_selector: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs']] = None,
namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Required. A pod affinity term, associated with the corresponding weight.
:param pulumi.Input[str] topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs'] label_selector: A label query over a set of resources, in this case pods.
:param pulumi.Input[Sequence[pulumi.Input[str]]] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> pulumi.Input[str]:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@topology_key.setter
def topology_key(self, value: pulumi.Input[str]):
pulumi.set(self, "topology_key", value)
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs']]:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@label_selector.setter
def label_selector(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs']]):
pulumi.set(self, "label_selector", value)
@property
@pulumi.getter
def namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
@namespaces.setter
def namespaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "namespaces", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
A label query over a set of resources, in this case pods.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs:
def __init__(__self__, *,
topology_key: pulumi.Input[str],
label_selector: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs']] = None,
namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
:param pulumi.Input[str] topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs'] label_selector: A label query over a set of resources, in this case pods.
:param pulumi.Input[Sequence[pulumi.Input[str]]] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> pulumi.Input[str]:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@topology_key.setter
def topology_key(self, value: pulumi.Input[str]):
pulumi.set(self, "topology_key", value)
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs']]:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@label_selector.setter
def label_selector(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs']]):
pulumi.set(self, "label_selector", value)
@property
@pulumi.getter
def namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
@namespaces.setter
def namespaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "namespaces", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
A label query over a set of resources, in this case pods.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
env: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvArgs']]]] = None,
env_from: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvFromArgs']]]] = None,
image: Optional[pulumi.Input[str]] = None,
image_pull_policy: Optional[pulumi.Input[str]] = None,
lifecycle: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecycleArgs']] = None,
liveness_probe: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeArgs']] = None,
ports: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersPortsArgs']]]] = None,
readiness_probe: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeArgs']] = None,
resources: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersResourcesArgs']] = None,
security_context: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextArgs']] = None,
startup_probe: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeArgs']] = None,
stdin: Optional[pulumi.Input[bool]] = None,
stdin_once: Optional[pulumi.Input[bool]] = None,
termination_message_path: Optional[pulumi.Input[str]] = None,
termination_message_policy: Optional[pulumi.Input[str]] = None,
tty: Optional[pulumi.Input[bool]] = None,
volume_devices: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersVolumeDevicesArgs']]]] = None,
volume_mounts: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersVolumeMountsArgs']]]] = None,
working_dir: Optional[pulumi.Input[str]] = None):
"""
A single application container that you want to run within a pod.
:param pulumi.Input[str] name: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
:param pulumi.Input[Sequence[pulumi.Input[str]]] args: Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvArgs']]] env: List of environment variables to set in the container. Cannot be updated.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvFromArgs']]] env_from: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.
:param pulumi.Input[str] image: Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.
:param pulumi.Input[str] image_pull_policy: Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecycleArgs'] lifecycle: Actions that the management system should take in response to container lifecycle events. Cannot be updated.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeArgs'] liveness_probe: Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersPortsArgs']]] ports: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeArgs'] readiness_probe: Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersResourcesArgs'] resources: Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextArgs'] security_context: Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeArgs'] startup_probe: StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[bool] stdin: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.
:param pulumi.Input[bool] stdin_once: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false
:param pulumi.Input[str] termination_message_path: Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.
:param pulumi.Input[str] termination_message_policy: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.
:param pulumi.Input[bool] tty: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersVolumeDevicesArgs']]] volume_devices: volumeDevices is the list of block devices to be used by the container.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersVolumeMountsArgs']]] volume_mounts: Pod volumes to mount into the container's filesystem. Cannot be updated.
:param pulumi.Input[str] working_dir: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
"""
pulumi.set(__self__, "name", name)
if args is not None:
pulumi.set(__self__, "args", args)
if command is not None:
pulumi.set(__self__, "command", command)
if env is not None:
pulumi.set(__self__, "env", env)
if env_from is not None:
pulumi.set(__self__, "env_from", env_from)
if image is not None:
pulumi.set(__self__, "image", image)
if image_pull_policy is not None:
pulumi.set(__self__, "image_pull_policy", image_pull_policy)
if lifecycle is not None:
pulumi.set(__self__, "lifecycle", lifecycle)
if liveness_probe is not None:
pulumi.set(__self__, "liveness_probe", liveness_probe)
if ports is not None:
pulumi.set(__self__, "ports", ports)
if readiness_probe is not None:
pulumi.set(__self__, "readiness_probe", readiness_probe)
if resources is not None:
pulumi.set(__self__, "resources", resources)
if security_context is not None:
pulumi.set(__self__, "security_context", security_context)
if startup_probe is not None:
pulumi.set(__self__, "startup_probe", startup_probe)
if stdin is not None:
pulumi.set(__self__, "stdin", stdin)
if stdin_once is not None:
pulumi.set(__self__, "stdin_once", stdin_once)
if termination_message_path is not None:
pulumi.set(__self__, "termination_message_path", termination_message_path)
if termination_message_policy is not None:
pulumi.set(__self__, "termination_message_policy", termination_message_policy)
if tty is not None:
pulumi.set(__self__, "tty", tty)
if volume_devices is not None:
pulumi.set(__self__, "volume_devices", volume_devices)
if volume_mounts is not None:
pulumi.set(__self__, "volume_mounts", volume_mounts)
if working_dir is not None:
pulumi.set(__self__, "working_dir", working_dir)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
"""
return pulumi.get(self, "args")
@args.setter
def args(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "args", value)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@property
@pulumi.getter
def env(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvArgs']]]]:
"""
List of environment variables to set in the container. Cannot be updated.
"""
return pulumi.get(self, "env")
@env.setter
def env(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvArgs']]]]):
pulumi.set(self, "env", value)
@property
@pulumi.getter(name="envFrom")
def env_from(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvFromArgs']]]]:
"""
List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.
"""
return pulumi.get(self, "env_from")
@env_from.setter
def env_from(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvFromArgs']]]]):
pulumi.set(self, "env_from", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input[str]]:
"""
Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.
"""
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="imagePullPolicy")
def image_pull_policy(self) -> Optional[pulumi.Input[str]]:
"""
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
"""
return pulumi.get(self, "image_pull_policy")
@image_pull_policy.setter
def image_pull_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_pull_policy", value)
@property
@pulumi.getter
def lifecycle(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecycleArgs']]:
"""
Actions that the management system should take in response to container lifecycle events. Cannot be updated.
"""
return pulumi.get(self, "lifecycle")
@lifecycle.setter
def lifecycle(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecycleArgs']]):
pulumi.set(self, "lifecycle", value)
@property
@pulumi.getter(name="livenessProbe")
def liveness_probe(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeArgs']]:
"""
Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "liveness_probe")
@liveness_probe.setter
def liveness_probe(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeArgs']]):
pulumi.set(self, "liveness_probe", value)
@property
@pulumi.getter
def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersPortsArgs']]]]:
"""
List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
"""
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersPortsArgs']]]]):
pulumi.set(self, "ports", value)
@property
@pulumi.getter(name="readinessProbe")
def readiness_probe(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeArgs']]:
"""
Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "readiness_probe")
@readiness_probe.setter
def readiness_probe(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeArgs']]):
pulumi.set(self, "readiness_probe", value)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersResourcesArgs']]:
"""
Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersResourcesArgs']]):
pulumi.set(self, "resources", value)
@property
@pulumi.getter(name="securityContext")
def security_context(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextArgs']]:
"""
Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
"""
return pulumi.get(self, "security_context")
@security_context.setter
def security_context(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextArgs']]):
pulumi.set(self, "security_context", value)
@property
@pulumi.getter(name="startupProbe")
def startup_probe(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeArgs']]:
"""
StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "startup_probe")
@startup_probe.setter
def startup_probe(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeArgs']]):
pulumi.set(self, "startup_probe", value)
@property
@pulumi.getter
def stdin(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.
"""
return pulumi.get(self, "stdin")
@stdin.setter
def stdin(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "stdin", value)
@property
@pulumi.getter(name="stdinOnce")
def stdin_once(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false
"""
return pulumi.get(self, "stdin_once")
@stdin_once.setter
def stdin_once(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "stdin_once", value)
@property
@pulumi.getter(name="terminationMessagePath")
def termination_message_path(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.
"""
return pulumi.get(self, "termination_message_path")
@termination_message_path.setter
def termination_message_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "termination_message_path", value)
@property
@pulumi.getter(name="terminationMessagePolicy")
def termination_message_policy(self) -> Optional[pulumi.Input[str]]:
"""
Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.
"""
return pulumi.get(self, "termination_message_policy")
@termination_message_policy.setter
def termination_message_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "termination_message_policy", value)
@property
@pulumi.getter
def tty(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.
"""
return pulumi.get(self, "tty")
@tty.setter
def tty(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "tty", value)
@property
@pulumi.getter(name="volumeDevices")
def volume_devices(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersVolumeDevicesArgs']]]]:
"""
volumeDevices is the list of block devices to be used by the container.
"""
return pulumi.get(self, "volume_devices")
@volume_devices.setter
def volume_devices(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersVolumeDevicesArgs']]]]):
pulumi.set(self, "volume_devices", value)
@property
@pulumi.getter(name="volumeMounts")
def volume_mounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersVolumeMountsArgs']]]]:
"""
Pod volumes to mount into the container's filesystem. Cannot be updated.
"""
return pulumi.get(self, "volume_mounts")
@volume_mounts.setter
def volume_mounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersVolumeMountsArgs']]]]):
pulumi.set(self, "volume_mounts", value)
@property
@pulumi.getter(name="workingDir")
def working_dir(self) -> Optional[pulumi.Input[str]]:
"""
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
"""
return pulumi.get(self, "working_dir")
@working_dir.setter
def working_dir(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "working_dir", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: Optional[pulumi.Input[str]] = None,
value_from: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromArgs']] = None):
"""
EnvVar represents an environment variable present in a Container.
:param pulumi.Input[str] name: Name of the environment variable. Must be a C_IDENTIFIER.
:param pulumi.Input[str] value: Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromArgs'] value_from: Source for the environment variable's value. Cannot be used if value is not empty.
"""
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
if value_from is not None:
pulumi.set(__self__, "value_from", value_from)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the environment variable. Must be a C_IDENTIFIER.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="valueFrom")
def value_from(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromArgs']]:
"""
Source for the environment variable's value. Cannot be used if value is not empty.
"""
return pulumi.get(self, "value_from")
@value_from.setter
def value_from(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromArgs']]):
pulumi.set(self, "value_from", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvFromArgs:
def __init__(__self__, *,
config_map_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvFromConfigMapRefArgs']] = None,
prefix: Optional[pulumi.Input[str]] = None,
secret_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvFromSecretRefArgs']] = None):
"""
EnvFromSource represents the source of a set of ConfigMaps
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvFromConfigMapRefArgs'] config_map_ref: The ConfigMap to select from
:param pulumi.Input[str] prefix: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvFromSecretRefArgs'] secret_ref: The Secret to select from
"""
if config_map_ref is not None:
pulumi.set(__self__, "config_map_ref", config_map_ref)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
if secret_ref is not None:
pulumi.set(__self__, "secret_ref", secret_ref)
@property
@pulumi.getter(name="configMapRef")
def config_map_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvFromConfigMapRefArgs']]:
"""
The ConfigMap to select from
"""
return pulumi.get(self, "config_map_ref")
@config_map_ref.setter
def config_map_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvFromConfigMapRefArgs']]):
pulumi.set(self, "config_map_ref", value)
@property
@pulumi.getter
def prefix(self) -> Optional[pulumi.Input[str]]:
"""
An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
"""
return pulumi.get(self, "prefix")
@prefix.setter
def prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prefix", value)
@property
@pulumi.getter(name="secretRef")
def secret_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvFromSecretRefArgs']]:
"""
The Secret to select from
"""
return pulumi.get(self, "secret_ref")
@secret_ref.setter
def secret_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvFromSecretRefArgs']]):
pulumi.set(self, "secret_ref", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvFromConfigMapRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
The ConfigMap to select from
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the ConfigMap must be defined
"""
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the ConfigMap must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvFromSecretRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
The Secret to select from
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the Secret must be defined
"""
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the Secret must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromArgs:
def __init__(__self__, *,
config_map_key_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromConfigMapKeyRefArgs']] = None,
field_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromFieldRefArgs']] = None,
resource_field_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromResourceFieldRefArgs']] = None,
secret_key_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromSecretKeyRefArgs']] = None):
"""
Source for the environment variable's value. Cannot be used if value is not empty.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromConfigMapKeyRefArgs'] config_map_key_ref: Selects a key of a ConfigMap.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromFieldRefArgs'] field_ref: Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromResourceFieldRefArgs'] resource_field_ref: Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromSecretKeyRefArgs'] secret_key_ref: Selects a key of a secret in the pod's namespace
"""
if config_map_key_ref is not None:
pulumi.set(__self__, "config_map_key_ref", config_map_key_ref)
if field_ref is not None:
pulumi.set(__self__, "field_ref", field_ref)
if resource_field_ref is not None:
pulumi.set(__self__, "resource_field_ref", resource_field_ref)
if secret_key_ref is not None:
pulumi.set(__self__, "secret_key_ref", secret_key_ref)
@property
@pulumi.getter(name="configMapKeyRef")
def config_map_key_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromConfigMapKeyRefArgs']]:
"""
Selects a key of a ConfigMap.
"""
return pulumi.get(self, "config_map_key_ref")
@config_map_key_ref.setter
def config_map_key_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromConfigMapKeyRefArgs']]):
pulumi.set(self, "config_map_key_ref", value)
@property
@pulumi.getter(name="fieldRef")
def field_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromFieldRefArgs']]:
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
"""
return pulumi.get(self, "field_ref")
@field_ref.setter
def field_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromFieldRefArgs']]):
pulumi.set(self, "field_ref", value)
@property
@pulumi.getter(name="resourceFieldRef")
def resource_field_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromResourceFieldRefArgs']]:
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
"""
return pulumi.get(self, "resource_field_ref")
@resource_field_ref.setter
def resource_field_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromResourceFieldRefArgs']]):
pulumi.set(self, "resource_field_ref", value)
@property
@pulumi.getter(name="secretKeyRef")
def secret_key_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromSecretKeyRefArgs']]:
"""
Selects a key of a secret in the pod's namespace
"""
return pulumi.get(self, "secret_key_ref")
@secret_key_ref.setter
def secret_key_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromSecretKeyRefArgs']]):
pulumi.set(self, "secret_key_ref", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromConfigMapKeyRefArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
Selects a key of a ConfigMap.
:param pulumi.Input[str] key: The key to select.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the ConfigMap or its key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key to select.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the ConfigMap or its key must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromFieldRefArgs:
def __init__(__self__, *,
field_path: pulumi.Input[str],
api_version: Optional[pulumi.Input[str]] = None):
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
:param pulumi.Input[str] field_path: Path of the field to select in the specified API version.
:param pulumi.Input[str] api_version: Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
pulumi.set(__self__, "field_path", field_path)
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> pulumi.Input[str]:
"""
Path of the field to select in the specified API version.
"""
return pulumi.get(self, "field_path")
@field_path.setter
def field_path(self, value: pulumi.Input[str]):
pulumi.set(self, "field_path", value)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromResourceFieldRefArgs:
def __init__(__self__, *,
resource: pulumi.Input[str],
container_name: Optional[pulumi.Input[str]] = None,
divisor: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromResourceFieldRefDivisorArgs']] = None):
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param pulumi.Input[str] resource: Required: resource to select
:param pulumi.Input[str] container_name: Container name: required for volumes, optional for env vars
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromResourceFieldRefDivisorArgs'] divisor: Specifies the output format of the exposed resources, defaults to "1"
"""
pulumi.set(__self__, "resource", resource)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if divisor is not None:
pulumi.set(__self__, "divisor", divisor)
@property
@pulumi.getter
def resource(self) -> pulumi.Input[str]:
"""
Required: resource to select
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: pulumi.Input[str]):
pulumi.set(self, "resource", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Container name: required for volumes, optional for env vars
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter
def divisor(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromResourceFieldRefDivisorArgs']]:
"""
Specifies the output format of the exposed resources, defaults to "1"
"""
return pulumi.get(self, "divisor")
@divisor.setter
def divisor(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromResourceFieldRefDivisorArgs']]):
pulumi.set(self, "divisor", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromResourceFieldRefDivisorArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersEnvValueFromSecretKeyRefArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
Selects a key of a secret in the pod's namespace
:param pulumi.Input[str] key: The key of the secret to select from. Must be a valid secret key.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the Secret or its key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key of the secret to select from. Must be a valid secret key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the Secret or its key must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecycleArgs:
def __init__(__self__, *,
post_start: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartArgs']] = None,
pre_stop: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopArgs']] = None):
"""
Actions that the management system should take in response to container lifecycle events. Cannot be updated.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartArgs'] post_start: PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopArgs'] pre_stop: PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
if post_start is not None:
pulumi.set(__self__, "post_start", post_start)
if pre_stop is not None:
pulumi.set(__self__, "pre_stop", pre_stop)
@property
@pulumi.getter(name="postStart")
def post_start(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartArgs']]:
"""
PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
return pulumi.get(self, "post_start")
@post_start.setter
def post_start(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartArgs']]):
pulumi.set(self, "post_start", value)
@property
@pulumi.getter(name="preStop")
def pre_stop(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopArgs']]:
"""
PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
return pulumi.get(self, "pre_stop")
@pre_stop.setter
def pre_stop(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopArgs']]):
pulumi.set(self, "pre_stop", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartExecArgs']] = None,
http_get: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartHttpGetArgs']] = None,
tcp_socket: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartTcpSocketArgs']] = None):
"""
PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input[Union[int, str]] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[Union[int, str]] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopExecArgs']] = None,
http_get: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopHttpGetArgs']] = None,
tcp_socket: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopTcpSocketArgs']] = None):
"""
PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input[Union[int, str]] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStopTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[Union[int, str]] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeExecArgs']] = None,
failure_threshold: Optional[pulumi.Input[int]] = None,
http_get: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeHttpGetArgs']] = None,
initial_delay_seconds: Optional[pulumi.Input[int]] = None,
period_seconds: Optional[pulumi.Input[int]] = None,
success_threshold: Optional[pulumi.Input[int]] = None,
tcp_socket: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeTcpSocketArgs']] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[int] failure_threshold: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input[int] initial_delay_seconds: Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[int] period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
:param pulumi.Input[int] success_threshold: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[int] timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if initial_delay_seconds is not None:
pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
if period_seconds is not None:
pulumi.set(__self__, "period_seconds", period_seconds)
if success_threshold is not None:
pulumi.set(__self__, "success_threshold", success_threshold)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="initialDelaySeconds")
def initial_delay_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "initial_delay_seconds")
@initial_delay_seconds.setter
def initial_delay_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial_delay_seconds", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
"""
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter(name="successThreshold")
def success_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
"""
return pulumi.get(self, "success_threshold")
@success_threshold.setter
def success_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "success_threshold", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input[Union[int, str]] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLivenessProbeTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[Union[int, str]] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersPortsArgs:
def __init__(__self__, *,
container_port: pulumi.Input[int],
protocol: pulumi.Input[str],
host_ip: Optional[pulumi.Input[str]] = None,
host_port: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
ContainerPort represents a network port in a single container.
:param pulumi.Input[int] container_port: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.
:param pulumi.Input[str] protocol: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".
:param pulumi.Input[str] host_ip: What host IP to bind the external port to.
:param pulumi.Input[int] host_port: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.
:param pulumi.Input[str] name: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.
"""
pulumi.set(__self__, "container_port", container_port)
pulumi.set(__self__, "protocol", protocol)
if host_ip is not None:
pulumi.set(__self__, "host_ip", host_ip)
if host_port is not None:
pulumi.set(__self__, "host_port", host_port)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="containerPort")
def container_port(self) -> pulumi.Input[int]:
"""
Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.
"""
return pulumi.get(self, "container_port")
@container_port.setter
def container_port(self, value: pulumi.Input[int]):
pulumi.set(self, "container_port", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[str]:
"""
Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[str]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="hostIP")
def host_ip(self) -> Optional[pulumi.Input[str]]:
"""
What host IP to bind the external port to.
"""
return pulumi.get(self, "host_ip")
@host_ip.setter
def host_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_ip", value)
@property
@pulumi.getter(name="hostPort")
def host_port(self) -> Optional[pulumi.Input[int]]:
"""
Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.
"""
return pulumi.get(self, "host_port")
@host_port.setter
def host_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "host_port", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeExecArgs']] = None,
failure_threshold: Optional[pulumi.Input[int]] = None,
http_get: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeHttpGetArgs']] = None,
initial_delay_seconds: Optional[pulumi.Input[int]] = None,
period_seconds: Optional[pulumi.Input[int]] = None,
success_threshold: Optional[pulumi.Input[int]] = None,
tcp_socket: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeTcpSocketArgs']] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[int] failure_threshold: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input[int] initial_delay_seconds: Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[int] period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
:param pulumi.Input[int] success_threshold: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[int] timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if initial_delay_seconds is not None:
pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
if period_seconds is not None:
pulumi.set(__self__, "period_seconds", period_seconds)
if success_threshold is not None:
pulumi.set(__self__, "success_threshold", success_threshold)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="initialDelaySeconds")
def initial_delay_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "initial_delay_seconds")
@initial_delay_seconds.setter
def initial_delay_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial_delay_seconds", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
"""
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter(name="successThreshold")
def success_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
"""
return pulumi.get(self, "success_threshold")
@success_threshold.setter
def success_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "success_threshold", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input[Union[int, str]] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersReadinessProbeTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[Union[int, str]] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersResourcesArgs:
def __init__(__self__, *,
limits: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersResourcesLimitsArgs']]]] = None,
requests: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersResourcesRequestsArgs']]]] = None):
"""
Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersResourcesLimitsArgs']]] limits: Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersResourcesRequestsArgs']]] requests: Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
if limits is not None:
pulumi.set(__self__, "limits", limits)
if requests is not None:
pulumi.set(__self__, "requests", requests)
@property
@pulumi.getter
def limits(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersResourcesLimitsArgs']]]]:
"""
Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "limits")
@limits.setter
def limits(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersResourcesLimitsArgs']]]]):
pulumi.set(self, "limits", value)
@property
@pulumi.getter
def requests(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersResourcesRequestsArgs']]]]:
"""
Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "requests")
@requests.setter
def requests(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersResourcesRequestsArgs']]]]):
pulumi.set(self, "requests", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersResourcesLimitsArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersResourcesRequestsArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextArgs:
def __init__(__self__, *,
allow_privilege_escalation: Optional[pulumi.Input[bool]] = None,
capabilities: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextCapabilitiesArgs']] = None,
privileged: Optional[pulumi.Input[bool]] = None,
proc_mount: Optional[pulumi.Input[str]] = None,
read_only_root_filesystem: Optional[pulumi.Input[bool]] = None,
run_as_group: Optional[pulumi.Input[int]] = None,
run_as_non_root: Optional[pulumi.Input[bool]] = None,
run_as_user: Optional[pulumi.Input[int]] = None,
se_linux_options: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextSeLinuxOptionsArgs']] = None,
windows_options: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextWindowsOptionsArgs']] = None):
"""
Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
:param pulumi.Input[bool] allow_privilege_escalation: AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextCapabilitiesArgs'] capabilities: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:param pulumi.Input[bool] privileged: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
:param pulumi.Input[str] proc_mount: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.
:param pulumi.Input[bool] read_only_root_filesystem: Whether this container has a read-only root filesystem. Default is false.
:param pulumi.Input[int] run_as_group: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[bool] run_as_non_root: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[int] run_as_user: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextSeLinuxOptionsArgs'] se_linux_options: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextWindowsOptionsArgs'] windows_options: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
if allow_privilege_escalation is not None:
pulumi.set(__self__, "allow_privilege_escalation", allow_privilege_escalation)
if capabilities is not None:
pulumi.set(__self__, "capabilities", capabilities)
if privileged is not None:
pulumi.set(__self__, "privileged", privileged)
if proc_mount is not None:
pulumi.set(__self__, "proc_mount", proc_mount)
if read_only_root_filesystem is not None:
pulumi.set(__self__, "read_only_root_filesystem", read_only_root_filesystem)
if run_as_group is not None:
pulumi.set(__self__, "run_as_group", run_as_group)
if run_as_non_root is not None:
pulumi.set(__self__, "run_as_non_root", run_as_non_root)
if run_as_user is not None:
pulumi.set(__self__, "run_as_user", run_as_user)
if se_linux_options is not None:
pulumi.set(__self__, "se_linux_options", se_linux_options)
if windows_options is not None:
pulumi.set(__self__, "windows_options", windows_options)
@property
@pulumi.getter(name="allowPrivilegeEscalation")
def allow_privilege_escalation(self) -> Optional[pulumi.Input[bool]]:
"""
AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
"""
return pulumi.get(self, "allow_privilege_escalation")
@allow_privilege_escalation.setter
def allow_privilege_escalation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_privilege_escalation", value)
@property
@pulumi.getter
def capabilities(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextCapabilitiesArgs']]:
"""
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
"""
return pulumi.get(self, "capabilities")
@capabilities.setter
def capabilities(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextCapabilitiesArgs']]):
pulumi.set(self, "capabilities", value)
@property
@pulumi.getter
def privileged(self) -> Optional[pulumi.Input[bool]]:
"""
Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
"""
return pulumi.get(self, "privileged")
@privileged.setter
def privileged(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "privileged", value)
@property
@pulumi.getter(name="procMount")
def proc_mount(self) -> Optional[pulumi.Input[str]]:
"""
procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.
"""
return pulumi.get(self, "proc_mount")
@proc_mount.setter
def proc_mount(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proc_mount", value)
@property
@pulumi.getter(name="readOnlyRootFilesystem")
def read_only_root_filesystem(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this container has a read-only root filesystem. Default is false.
"""
return pulumi.get(self, "read_only_root_filesystem")
@read_only_root_filesystem.setter
def read_only_root_filesystem(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only_root_filesystem", value)
@property
@pulumi.getter(name="runAsGroup")
def run_as_group(self) -> Optional[pulumi.Input[int]]:
"""
The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_group")
@run_as_group.setter
def run_as_group(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "run_as_group", value)
@property
@pulumi.getter(name="runAsNonRoot")
def run_as_non_root(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_non_root")
@run_as_non_root.setter
def run_as_non_root(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "run_as_non_root", value)
@property
@pulumi.getter(name="runAsUser")
def run_as_user(self) -> Optional[pulumi.Input[int]]:
"""
The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_user")
@run_as_user.setter
def run_as_user(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "run_as_user", value)
@property
@pulumi.getter(name="seLinuxOptions")
def se_linux_options(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextSeLinuxOptionsArgs']]:
"""
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "se_linux_options")
@se_linux_options.setter
def se_linux_options(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextSeLinuxOptionsArgs']]):
pulumi.set(self, "se_linux_options", value)
@property
@pulumi.getter(name="windowsOptions")
def windows_options(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextWindowsOptionsArgs']]:
"""
The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "windows_options")
@windows_options.setter
def windows_options(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextWindowsOptionsArgs']]):
pulumi.set(self, "windows_options", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextCapabilitiesArgs:
def __init__(__self__, *,
add: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
drop: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:param pulumi.Input[Sequence[pulumi.Input[str]]] add: Added capabilities
:param pulumi.Input[Sequence[pulumi.Input[str]]] drop: Removed capabilities
"""
if add is not None:
pulumi.set(__self__, "add", add)
if drop is not None:
pulumi.set(__self__, "drop", drop)
@property
@pulumi.getter
def add(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Added capabilities
"""
return pulumi.get(self, "add")
@add.setter
def add(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "add", value)
@property
@pulumi.getter
def drop(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Removed capabilities
"""
return pulumi.get(self, "drop")
@drop.setter
def drop(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "drop", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextSeLinuxOptionsArgs:
def __init__(__self__, *,
level: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None):
"""
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[str] level: Level is SELinux level label that applies to the container.
:param pulumi.Input[str] role: Role is a SELinux role label that applies to the container.
:param pulumi.Input[str] type: Type is a SELinux type label that applies to the container.
:param pulumi.Input[str] user: User is a SELinux user label that applies to the container.
"""
if level is not None:
pulumi.set(__self__, "level", level)
if role is not None:
pulumi.set(__self__, "role", role)
if type is not None:
pulumi.set(__self__, "type", type)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter
def level(self) -> Optional[pulumi.Input[str]]:
"""
Level is SELinux level label that applies to the container.
"""
return pulumi.get(self, "level")
@level.setter
def level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "level", value)
@property
@pulumi.getter
def role(self) -> Optional[pulumi.Input[str]]:
"""
Role is a SELinux role label that applies to the container.
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Type is a SELinux type label that applies to the container.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
"""
User is a SELinux user label that applies to the container.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersSecurityContextWindowsOptionsArgs:
def __init__(__self__, *,
gmsa_credential_spec: Optional[pulumi.Input[str]] = None,
gmsa_credential_spec_name: Optional[pulumi.Input[str]] = None,
run_as_user_name: Optional[pulumi.Input[str]] = None):
"""
The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[str] gmsa_credential_spec: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.
:param pulumi.Input[str] gmsa_credential_spec_name: GMSACredentialSpecName is the name of the GMSA credential spec to use.
:param pulumi.Input[str] run_as_user_name: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
if gmsa_credential_spec is not None:
pulumi.set(__self__, "gmsa_credential_spec", gmsa_credential_spec)
if gmsa_credential_spec_name is not None:
pulumi.set(__self__, "gmsa_credential_spec_name", gmsa_credential_spec_name)
if run_as_user_name is not None:
pulumi.set(__self__, "run_as_user_name", run_as_user_name)
@property
@pulumi.getter(name="gmsaCredentialSpec")
def gmsa_credential_spec(self) -> Optional[pulumi.Input[str]]:
"""
GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.
"""
return pulumi.get(self, "gmsa_credential_spec")
@gmsa_credential_spec.setter
def gmsa_credential_spec(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "gmsa_credential_spec", value)
@property
@pulumi.getter(name="gmsaCredentialSpecName")
def gmsa_credential_spec_name(self) -> Optional[pulumi.Input[str]]:
"""
GMSACredentialSpecName is the name of the GMSA credential spec to use.
"""
return pulumi.get(self, "gmsa_credential_spec_name")
@gmsa_credential_spec_name.setter
def gmsa_credential_spec_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "gmsa_credential_spec_name", value)
@property
@pulumi.getter(name="runAsUserName")
def run_as_user_name(self) -> Optional[pulumi.Input[str]]:
"""
The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_user_name")
@run_as_user_name.setter
def run_as_user_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "run_as_user_name", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeExecArgs']] = None,
failure_threshold: Optional[pulumi.Input[int]] = None,
http_get: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeHttpGetArgs']] = None,
initial_delay_seconds: Optional[pulumi.Input[int]] = None,
period_seconds: Optional[pulumi.Input[int]] = None,
success_threshold: Optional[pulumi.Input[int]] = None,
tcp_socket: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeTcpSocketArgs']] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[int] failure_threshold: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input[int] initial_delay_seconds: Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[int] period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
:param pulumi.Input[int] success_threshold: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[int] timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if initial_delay_seconds is not None:
pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
if period_seconds is not None:
pulumi.set(__self__, "period_seconds", period_seconds)
if success_threshold is not None:
pulumi.set(__self__, "success_threshold", success_threshold)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="initialDelaySeconds")
def initial_delay_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "initial_delay_seconds")
@initial_delay_seconds.setter
def initial_delay_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial_delay_seconds", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
"""
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter(name="successThreshold")
def success_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
"""
return pulumi.get(self, "success_threshold")
@success_threshold.setter
def success_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "success_threshold", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeHttpGetPortArgs'],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeHttpGetPortArgs'] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeHttpGetPortArgs']:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeHttpGetPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeHttpGetPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeTcpSocketPortArgs'],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeTcpSocketPortArgs'] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeTcpSocketPortArgs']:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeTcpSocketPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersStartupProbeTcpSocketPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersVolumeDevicesArgs:
def __init__(__self__, *,
device_path: pulumi.Input[str],
name: pulumi.Input[str]):
"""
volumeDevice describes a mapping of a raw block device within a container.
:param pulumi.Input[str] device_path: devicePath is the path inside of the container that the device will be mapped to.
:param pulumi.Input[str] name: name must match the name of a persistentVolumeClaim in the pod
"""
pulumi.set(__self__, "device_path", device_path)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="devicePath")
def device_path(self) -> pulumi.Input[str]:
"""
devicePath is the path inside of the container that the device will be mapped to.
"""
return pulumi.get(self, "device_path")
@device_path.setter
def device_path(self, value: pulumi.Input[str]):
pulumi.set(self, "device_path", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
name must match the name of a persistentVolumeClaim in the pod
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersVolumeMountsArgs:
def __init__(__self__, *,
mount_path: pulumi.Input[str],
name: pulumi.Input[str],
mount_propagation: Optional[pulumi.Input[str]] = None,
read_only: Optional[pulumi.Input[bool]] = None,
sub_path: Optional[pulumi.Input[str]] = None,
sub_path_expr: Optional[pulumi.Input[str]] = None):
"""
VolumeMount describes a mounting of a Volume within a container.
:param pulumi.Input[str] mount_path: Path within the container at which the volume should be mounted. Must not contain ':'.
:param pulumi.Input[str] name: This must match the Name of a Volume.
:param pulumi.Input[str] mount_propagation: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
:param pulumi.Input[bool] read_only: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
:param pulumi.Input[str] sub_path: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
:param pulumi.Input[str] sub_path_expr: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.
"""
pulumi.set(__self__, "mount_path", mount_path)
pulumi.set(__self__, "name", name)
if mount_propagation is not None:
pulumi.set(__self__, "mount_propagation", mount_propagation)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if sub_path is not None:
pulumi.set(__self__, "sub_path", sub_path)
if sub_path_expr is not None:
pulumi.set(__self__, "sub_path_expr", sub_path_expr)
@property
@pulumi.getter(name="mountPath")
def mount_path(self) -> pulumi.Input[str]:
"""
Path within the container at which the volume should be mounted. Must not contain ':'.
"""
return pulumi.get(self, "mount_path")
@mount_path.setter
def mount_path(self, value: pulumi.Input[str]):
pulumi.set(self, "mount_path", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
This must match the Name of a Volume.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="mountPropagation")
def mount_propagation(self) -> Optional[pulumi.Input[str]]:
"""
mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
"""
return pulumi.get(self, "mount_propagation")
@mount_propagation.setter
def mount_propagation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mount_propagation", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@property
@pulumi.getter(name="subPath")
def sub_path(self) -> Optional[pulumi.Input[str]]:
"""
Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
"""
return pulumi.get(self, "sub_path")
@sub_path.setter
def sub_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sub_path", value)
@property
@pulumi.getter(name="subPathExpr")
def sub_path_expr(self) -> Optional[pulumi.Input[str]]:
"""
Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.
"""
return pulumi.get(self, "sub_path_expr")
@sub_path_expr.setter
def sub_path_expr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sub_path_expr", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecDnsConfigArgs:
def __init__(__self__, *,
nameservers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
options: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecDnsConfigOptionsArgs']]]] = None,
searches: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.
:param pulumi.Input[Sequence[pulumi.Input[str]]] nameservers: A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecDnsConfigOptionsArgs']]] options: A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.
:param pulumi.Input[Sequence[pulumi.Input[str]]] searches: A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.
"""
if nameservers is not None:
pulumi.set(__self__, "nameservers", nameservers)
if options is not None:
pulumi.set(__self__, "options", options)
if searches is not None:
pulumi.set(__self__, "searches", searches)
@property
@pulumi.getter
def nameservers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.
"""
return pulumi.get(self, "nameservers")
@nameservers.setter
def nameservers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "nameservers", value)
@property
@pulumi.getter
def options(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecDnsConfigOptionsArgs']]]]:
"""
A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.
"""
return pulumi.get(self, "options")
@options.setter
def options(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecDnsConfigOptionsArgs']]]]):
pulumi.set(self, "options", value)
@property
@pulumi.getter
def searches(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.
"""
return pulumi.get(self, "searches")
@searches.setter
def searches(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "searches", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecDnsConfigOptionsArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
PodDNSConfigOption defines DNS resolver options of a pod.
:param pulumi.Input[str] name: Required.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Required.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
env: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvArgs']]]] = None,
env_from: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvFromArgs']]]] = None,
image: Optional[pulumi.Input[str]] = None,
image_pull_policy: Optional[pulumi.Input[str]] = None,
lifecycle: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecycleArgs']] = None,
liveness_probe: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeArgs']] = None,
ports: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersPortsArgs']]]] = None,
readiness_probe: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeArgs']] = None,
resources: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesArgs']] = None,
security_context: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextArgs']] = None,
startup_probe: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeArgs']] = None,
stdin: Optional[pulumi.Input[bool]] = None,
stdin_once: Optional[pulumi.Input[bool]] = None,
target_container_name: Optional[pulumi.Input[str]] = None,
termination_message_path: Optional[pulumi.Input[str]] = None,
termination_message_policy: Optional[pulumi.Input[str]] = None,
tty: Optional[pulumi.Input[bool]] = None,
volume_devices: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersVolumeDevicesArgs']]]] = None,
volume_mounts: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersVolumeMountsArgs']]]] = None,
working_dir: Optional[pulumi.Input[str]] = None):
"""
An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.
:param pulumi.Input[str] name: Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.
:param pulumi.Input[Sequence[pulumi.Input[str]]] args: Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvArgs']]] env: List of environment variables to set in the container. Cannot be updated.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvFromArgs']]] env_from: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.
:param pulumi.Input[str] image: Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images
:param pulumi.Input[str] image_pull_policy: Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecycleArgs'] lifecycle: Lifecycle is not allowed for ephemeral containers.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeArgs'] liveness_probe: Probes are not allowed for ephemeral containers.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersPortsArgs']]] ports: Ports are not allowed for ephemeral containers.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeArgs'] readiness_probe: Probes are not allowed for ephemeral containers.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesArgs'] resources: Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextArgs'] security_context: SecurityContext is not allowed for ephemeral containers.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeArgs'] startup_probe: Probes are not allowed for ephemeral containers.
:param pulumi.Input[bool] stdin: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.
:param pulumi.Input[bool] stdin_once: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false
:param pulumi.Input[str] target_container_name: If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature.
:param pulumi.Input[str] termination_message_path: Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.
:param pulumi.Input[str] termination_message_policy: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.
:param pulumi.Input[bool] tty: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersVolumeDevicesArgs']]] volume_devices: volumeDevices is the list of block devices to be used by the container.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersVolumeMountsArgs']]] volume_mounts: Pod volumes to mount into the container's filesystem. Cannot be updated.
:param pulumi.Input[str] working_dir: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
"""
pulumi.set(__self__, "name", name)
if args is not None:
pulumi.set(__self__, "args", args)
if command is not None:
pulumi.set(__self__, "command", command)
if env is not None:
pulumi.set(__self__, "env", env)
if env_from is not None:
pulumi.set(__self__, "env_from", env_from)
if image is not None:
pulumi.set(__self__, "image", image)
if image_pull_policy is not None:
pulumi.set(__self__, "image_pull_policy", image_pull_policy)
if lifecycle is not None:
pulumi.set(__self__, "lifecycle", lifecycle)
if liveness_probe is not None:
pulumi.set(__self__, "liveness_probe", liveness_probe)
if ports is not None:
pulumi.set(__self__, "ports", ports)
if readiness_probe is not None:
pulumi.set(__self__, "readiness_probe", readiness_probe)
if resources is not None:
pulumi.set(__self__, "resources", resources)
if security_context is not None:
pulumi.set(__self__, "security_context", security_context)
if startup_probe is not None:
pulumi.set(__self__, "startup_probe", startup_probe)
if stdin is not None:
pulumi.set(__self__, "stdin", stdin)
if stdin_once is not None:
pulumi.set(__self__, "stdin_once", stdin_once)
if target_container_name is not None:
pulumi.set(__self__, "target_container_name", target_container_name)
if termination_message_path is not None:
pulumi.set(__self__, "termination_message_path", termination_message_path)
if termination_message_policy is not None:
pulumi.set(__self__, "termination_message_policy", termination_message_policy)
if tty is not None:
pulumi.set(__self__, "tty", tty)
if volume_devices is not None:
pulumi.set(__self__, "volume_devices", volume_devices)
if volume_mounts is not None:
pulumi.set(__self__, "volume_mounts", volume_mounts)
if working_dir is not None:
pulumi.set(__self__, "working_dir", working_dir)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
"""
return pulumi.get(self, "args")
@args.setter
def args(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "args", value)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@property
@pulumi.getter
def env(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvArgs']]]]:
"""
List of environment variables to set in the container. Cannot be updated.
"""
return pulumi.get(self, "env")
@env.setter
def env(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvArgs']]]]):
pulumi.set(self, "env", value)
@property
@pulumi.getter(name="envFrom")
def env_from(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvFromArgs']]]]:
"""
List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.
"""
return pulumi.get(self, "env_from")
@env_from.setter
def env_from(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvFromArgs']]]]):
pulumi.set(self, "env_from", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input[str]]:
"""
Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images
"""
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="imagePullPolicy")
def image_pull_policy(self) -> Optional[pulumi.Input[str]]:
"""
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
"""
return pulumi.get(self, "image_pull_policy")
@image_pull_policy.setter
def image_pull_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_pull_policy", value)
@property
@pulumi.getter
def lifecycle(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecycleArgs']]:
"""
Lifecycle is not allowed for ephemeral containers.
"""
return pulumi.get(self, "lifecycle")
@lifecycle.setter
def lifecycle(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecycleArgs']]):
pulumi.set(self, "lifecycle", value)
@property
@pulumi.getter(name="livenessProbe")
def liveness_probe(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeArgs']]:
"""
Probes are not allowed for ephemeral containers.
"""
return pulumi.get(self, "liveness_probe")
@liveness_probe.setter
def liveness_probe(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeArgs']]):
pulumi.set(self, "liveness_probe", value)
@property
@pulumi.getter
def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersPortsArgs']]]]:
"""
Ports are not allowed for ephemeral containers.
"""
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersPortsArgs']]]]):
pulumi.set(self, "ports", value)
@property
@pulumi.getter(name="readinessProbe")
def readiness_probe(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeArgs']]:
"""
Probes are not allowed for ephemeral containers.
"""
return pulumi.get(self, "readiness_probe")
@readiness_probe.setter
def readiness_probe(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeArgs']]):
pulumi.set(self, "readiness_probe", value)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesArgs']]:
"""
Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.
"""
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesArgs']]):
pulumi.set(self, "resources", value)
@property
@pulumi.getter(name="securityContext")
def security_context(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextArgs']]:
"""
SecurityContext is not allowed for ephemeral containers.
"""
return pulumi.get(self, "security_context")
@security_context.setter
def security_context(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextArgs']]):
pulumi.set(self, "security_context", value)
@property
@pulumi.getter(name="startupProbe")
def startup_probe(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeArgs']]:
"""
Probes are not allowed for ephemeral containers.
"""
return pulumi.get(self, "startup_probe")
@startup_probe.setter
def startup_probe(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeArgs']]):
pulumi.set(self, "startup_probe", value)
@property
@pulumi.getter
def stdin(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.
"""
return pulumi.get(self, "stdin")
@stdin.setter
def stdin(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "stdin", value)
@property
@pulumi.getter(name="stdinOnce")
def stdin_once(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false
"""
return pulumi.get(self, "stdin_once")
@stdin_once.setter
def stdin_once(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "stdin_once", value)
@property
@pulumi.getter(name="targetContainerName")
def target_container_name(self) -> Optional[pulumi.Input[str]]:
"""
If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature.
"""
return pulumi.get(self, "target_container_name")
@target_container_name.setter
def target_container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_container_name", value)
@property
@pulumi.getter(name="terminationMessagePath")
def termination_message_path(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.
"""
return pulumi.get(self, "termination_message_path")
@termination_message_path.setter
def termination_message_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "termination_message_path", value)
@property
@pulumi.getter(name="terminationMessagePolicy")
def termination_message_policy(self) -> Optional[pulumi.Input[str]]:
"""
Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.
"""
return pulumi.get(self, "termination_message_policy")
@termination_message_policy.setter
def termination_message_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "termination_message_policy", value)
@property
@pulumi.getter
def tty(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.
"""
return pulumi.get(self, "tty")
@tty.setter
def tty(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "tty", value)
@property
@pulumi.getter(name="volumeDevices")
def volume_devices(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersVolumeDevicesArgs']]]]:
"""
volumeDevices is the list of block devices to be used by the container.
"""
return pulumi.get(self, "volume_devices")
@volume_devices.setter
def volume_devices(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersVolumeDevicesArgs']]]]):
pulumi.set(self, "volume_devices", value)
@property
@pulumi.getter(name="volumeMounts")
def volume_mounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersVolumeMountsArgs']]]]:
"""
Pod volumes to mount into the container's filesystem. Cannot be updated.
"""
return pulumi.get(self, "volume_mounts")
@volume_mounts.setter
def volume_mounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersVolumeMountsArgs']]]]):
pulumi.set(self, "volume_mounts", value)
@property
@pulumi.getter(name="workingDir")
def working_dir(self) -> Optional[pulumi.Input[str]]:
"""
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
"""
return pulumi.get(self, "working_dir")
@working_dir.setter
def working_dir(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "working_dir", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: Optional[pulumi.Input[str]] = None,
value_from: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromArgs']] = None):
"""
EnvVar represents an environment variable present in a Container.
:param pulumi.Input[str] name: Name of the environment variable. Must be a C_IDENTIFIER.
:param pulumi.Input[str] value: Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromArgs'] value_from: Source for the environment variable's value. Cannot be used if value is not empty.
"""
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
if value_from is not None:
pulumi.set(__self__, "value_from", value_from)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the environment variable. Must be a C_IDENTIFIER.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="valueFrom")
def value_from(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromArgs']]:
"""
Source for the environment variable's value. Cannot be used if value is not empty.
"""
return pulumi.get(self, "value_from")
@value_from.setter
def value_from(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromArgs']]):
pulumi.set(self, "value_from", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvFromArgs:
def __init__(__self__, *,
config_map_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvFromConfigMapRefArgs']] = None,
prefix: Optional[pulumi.Input[str]] = None,
secret_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvFromSecretRefArgs']] = None):
"""
EnvFromSource represents the source of a set of ConfigMaps
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvFromConfigMapRefArgs'] config_map_ref: The ConfigMap to select from
:param pulumi.Input[str] prefix: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvFromSecretRefArgs'] secret_ref: The Secret to select from
"""
if config_map_ref is not None:
pulumi.set(__self__, "config_map_ref", config_map_ref)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
if secret_ref is not None:
pulumi.set(__self__, "secret_ref", secret_ref)
@property
@pulumi.getter(name="configMapRef")
def config_map_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvFromConfigMapRefArgs']]:
"""
The ConfigMap to select from
"""
return pulumi.get(self, "config_map_ref")
@config_map_ref.setter
def config_map_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvFromConfigMapRefArgs']]):
pulumi.set(self, "config_map_ref", value)
@property
@pulumi.getter
def prefix(self) -> Optional[pulumi.Input[str]]:
"""
An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
"""
return pulumi.get(self, "prefix")
@prefix.setter
def prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prefix", value)
@property
@pulumi.getter(name="secretRef")
def secret_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvFromSecretRefArgs']]:
"""
The Secret to select from
"""
return pulumi.get(self, "secret_ref")
@secret_ref.setter
def secret_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvFromSecretRefArgs']]):
pulumi.set(self, "secret_ref", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvFromConfigMapRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
The ConfigMap to select from
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the ConfigMap must be defined
"""
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the ConfigMap must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvFromSecretRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
The Secret to select from
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the Secret must be defined
"""
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the Secret must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromArgs:
def __init__(__self__, *,
config_map_key_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromConfigMapKeyRefArgs']] = None,
field_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromFieldRefArgs']] = None,
resource_field_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromResourceFieldRefArgs']] = None,
secret_key_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromSecretKeyRefArgs']] = None):
"""
Source for the environment variable's value. Cannot be used if value is not empty.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromConfigMapKeyRefArgs'] config_map_key_ref: Selects a key of a ConfigMap.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromFieldRefArgs'] field_ref: Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromResourceFieldRefArgs'] resource_field_ref: Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromSecretKeyRefArgs'] secret_key_ref: Selects a key of a secret in the pod's namespace
"""
if config_map_key_ref is not None:
pulumi.set(__self__, "config_map_key_ref", config_map_key_ref)
if field_ref is not None:
pulumi.set(__self__, "field_ref", field_ref)
if resource_field_ref is not None:
pulumi.set(__self__, "resource_field_ref", resource_field_ref)
if secret_key_ref is not None:
pulumi.set(__self__, "secret_key_ref", secret_key_ref)
@property
@pulumi.getter(name="configMapKeyRef")
def config_map_key_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromConfigMapKeyRefArgs']]:
"""
Selects a key of a ConfigMap.
"""
return pulumi.get(self, "config_map_key_ref")
@config_map_key_ref.setter
def config_map_key_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromConfigMapKeyRefArgs']]):
pulumi.set(self, "config_map_key_ref", value)
@property
@pulumi.getter(name="fieldRef")
def field_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromFieldRefArgs']]:
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
"""
return pulumi.get(self, "field_ref")
@field_ref.setter
def field_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromFieldRefArgs']]):
pulumi.set(self, "field_ref", value)
@property
@pulumi.getter(name="resourceFieldRef")
def resource_field_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromResourceFieldRefArgs']]:
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
"""
return pulumi.get(self, "resource_field_ref")
@resource_field_ref.setter
def resource_field_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromResourceFieldRefArgs']]):
pulumi.set(self, "resource_field_ref", value)
@property
@pulumi.getter(name="secretKeyRef")
def secret_key_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromSecretKeyRefArgs']]:
"""
Selects a key of a secret in the pod's namespace
"""
return pulumi.get(self, "secret_key_ref")
@secret_key_ref.setter
def secret_key_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromSecretKeyRefArgs']]):
pulumi.set(self, "secret_key_ref", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromConfigMapKeyRefArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
Selects a key of a ConfigMap.
:param pulumi.Input[str] key: The key to select.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the ConfigMap or its key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key to select.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the ConfigMap or its key must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromFieldRefArgs:
def __init__(__self__, *,
field_path: pulumi.Input[str],
api_version: Optional[pulumi.Input[str]] = None):
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
:param pulumi.Input[str] field_path: Path of the field to select in the specified API version.
:param pulumi.Input[str] api_version: Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
pulumi.set(__self__, "field_path", field_path)
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> pulumi.Input[str]:
"""
Path of the field to select in the specified API version.
"""
return pulumi.get(self, "field_path")
@field_path.setter
def field_path(self, value: pulumi.Input[str]):
pulumi.set(self, "field_path", value)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromResourceFieldRefArgs:
def __init__(__self__, *,
resource: pulumi.Input[str],
container_name: Optional[pulumi.Input[str]] = None,
divisor: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromResourceFieldRefDivisorArgs']] = None):
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param pulumi.Input[str] resource: Required: resource to select
:param pulumi.Input[str] container_name: Container name: required for volumes, optional for env vars
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromResourceFieldRefDivisorArgs'] divisor: Specifies the output format of the exposed resources, defaults to "1"
"""
pulumi.set(__self__, "resource", resource)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if divisor is not None:
pulumi.set(__self__, "divisor", divisor)
@property
@pulumi.getter
def resource(self) -> pulumi.Input[str]:
"""
Required: resource to select
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: pulumi.Input[str]):
pulumi.set(self, "resource", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Container name: required for volumes, optional for env vars
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter
def divisor(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromResourceFieldRefDivisorArgs']]:
"""
Specifies the output format of the exposed resources, defaults to "1"
"""
return pulumi.get(self, "divisor")
@divisor.setter
def divisor(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromResourceFieldRefDivisorArgs']]):
pulumi.set(self, "divisor", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromResourceFieldRefDivisorArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersEnvValueFromSecretKeyRefArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
Selects a key of a secret in the pod's namespace
:param pulumi.Input[str] key: The key of the secret to select from. Must be a valid secret key.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the Secret or its key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key of the secret to select from. Must be a valid secret key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the Secret or its key must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecycleArgs:
def __init__(__self__, *,
post_start: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartArgs']] = None,
pre_stop: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopArgs']] = None):
"""
Lifecycle is not allowed for ephemeral containers.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartArgs'] post_start: PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopArgs'] pre_stop: PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
if post_start is not None:
pulumi.set(__self__, "post_start", post_start)
if pre_stop is not None:
pulumi.set(__self__, "pre_stop", pre_stop)
@property
@pulumi.getter(name="postStart")
def post_start(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartArgs']]:
"""
PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
return pulumi.get(self, "post_start")
@post_start.setter
def post_start(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartArgs']]):
pulumi.set(self, "post_start", value)
@property
@pulumi.getter(name="preStop")
def pre_stop(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopArgs']]:
"""
PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
return pulumi.get(self, "pre_stop")
@pre_stop.setter
def pre_stop(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopArgs']]):
pulumi.set(self, "pre_stop", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartExecArgs']] = None,
http_get: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartHttpGetArgs']] = None,
tcp_socket: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartTcpSocketArgs']] = None):
"""
PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartHttpGetPortArgs'],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartHttpGetPortArgs'] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartHttpGetPortArgs']:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartHttpGetPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartHttpGetPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartTcpSocketPortArgs'],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartTcpSocketPortArgs'] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartTcpSocketPortArgs']:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartTcpSocketPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePostStartTcpSocketPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopExecArgs']] = None,
http_get: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopHttpGetArgs']] = None,
tcp_socket: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopTcpSocketArgs']] = None):
"""
PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopHttpGetPortArgs'],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopHttpGetPortArgs'] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopHttpGetPortArgs']:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopHttpGetPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopHttpGetPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopTcpSocketPortArgs'],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopTcpSocketPortArgs'] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopTcpSocketPortArgs']:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopTcpSocketPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLifecyclePreStopTcpSocketPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeExecArgs']] = None,
failure_threshold: Optional[pulumi.Input[int]] = None,
http_get: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeHttpGetArgs']] = None,
initial_delay_seconds: Optional[pulumi.Input[int]] = None,
period_seconds: Optional[pulumi.Input[int]] = None,
success_threshold: Optional[pulumi.Input[int]] = None,
tcp_socket: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeTcpSocketArgs']] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
Probes are not allowed for ephemeral containers.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[int] failure_threshold: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input[int] initial_delay_seconds: Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[int] period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
:param pulumi.Input[int] success_threshold: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[int] timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if initial_delay_seconds is not None:
pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
if period_seconds is not None:
pulumi.set(__self__, "period_seconds", period_seconds)
if success_threshold is not None:
pulumi.set(__self__, "success_threshold", success_threshold)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="initialDelaySeconds")
def initial_delay_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "initial_delay_seconds")
@initial_delay_seconds.setter
def initial_delay_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial_delay_seconds", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
"""
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter(name="successThreshold")
def success_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
"""
return pulumi.get(self, "success_threshold")
@success_threshold.setter
def success_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "success_threshold", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeHttpGetPortArgs'],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeHttpGetPortArgs'] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeHttpGetPortArgs']:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeHttpGetPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeHttpGetPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeTcpSocketPortArgs'],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeTcpSocketPortArgs'] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeTcpSocketPortArgs']:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeTcpSocketPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersLivenessProbeTcpSocketPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersPortsArgs:
def __init__(__self__, *,
container_port: pulumi.Input[int],
host_ip: Optional[pulumi.Input[str]] = None,
host_port: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None):
"""
ContainerPort represents a network port in a single container.
:param pulumi.Input[int] container_port: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.
:param pulumi.Input[str] host_ip: What host IP to bind the external port to.
:param pulumi.Input[int] host_port: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.
:param pulumi.Input[str] name: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.
:param pulumi.Input[str] protocol: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".
"""
pulumi.set(__self__, "container_port", container_port)
if host_ip is not None:
pulumi.set(__self__, "host_ip", host_ip)
if host_port is not None:
pulumi.set(__self__, "host_port", host_port)
if name is not None:
pulumi.set(__self__, "name", name)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
@property
@pulumi.getter(name="containerPort")
def container_port(self) -> pulumi.Input[int]:
"""
Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.
"""
return pulumi.get(self, "container_port")
@container_port.setter
def container_port(self, value: pulumi.Input[int]):
pulumi.set(self, "container_port", value)
@property
@pulumi.getter(name="hostIP")
def host_ip(self) -> Optional[pulumi.Input[str]]:
"""
What host IP to bind the external port to.
"""
return pulumi.get(self, "host_ip")
@host_ip.setter
def host_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_ip", value)
@property
@pulumi.getter(name="hostPort")
def host_port(self) -> Optional[pulumi.Input[int]]:
"""
Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.
"""
return pulumi.get(self, "host_port")
@host_port.setter
def host_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "host_port", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeExecArgs']] = None,
failure_threshold: Optional[pulumi.Input[int]] = None,
http_get: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeHttpGetArgs']] = None,
initial_delay_seconds: Optional[pulumi.Input[int]] = None,
period_seconds: Optional[pulumi.Input[int]] = None,
success_threshold: Optional[pulumi.Input[int]] = None,
tcp_socket: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeTcpSocketArgs']] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
Probes are not allowed for ephemeral containers.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[int] failure_threshold: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input[int] initial_delay_seconds: Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[int] period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
:param pulumi.Input[int] success_threshold: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[int] timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if initial_delay_seconds is not None:
pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
if period_seconds is not None:
pulumi.set(__self__, "period_seconds", period_seconds)
if success_threshold is not None:
pulumi.set(__self__, "success_threshold", success_threshold)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="initialDelaySeconds")
def initial_delay_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "initial_delay_seconds")
@initial_delay_seconds.setter
def initial_delay_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial_delay_seconds", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
"""
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter(name="successThreshold")
def success_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
"""
return pulumi.get(self, "success_threshold")
@success_threshold.setter
def success_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "success_threshold", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeHttpGetPortArgs'],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeHttpGetPortArgs'] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeHttpGetPortArgs']:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeHttpGetPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeHttpGetPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeTcpSocketPortArgs'],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeTcpSocketPortArgs'] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeTcpSocketPortArgs']:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeTcpSocketPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersReadinessProbeTcpSocketPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesArgs:
def __init__(__self__, *,
limits: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesLimitsArgs']]]] = None,
requests: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesRequestsArgs']]]] = None):
"""
Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.
:param pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesLimitsArgs']]] limits: Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesRequestsArgs']]] requests: Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
if limits is not None:
pulumi.set(__self__, "limits", limits)
if requests is not None:
pulumi.set(__self__, "requests", requests)
@property
@pulumi.getter
def limits(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesLimitsArgs']]]]:
"""
Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "limits")
@limits.setter
def limits(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesLimitsArgs']]]]):
pulumi.set(self, "limits", value)
@property
@pulumi.getter
def requests(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesRequestsArgs']]]]:
"""
Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "requests")
@requests.setter
def requests(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesRequestsArgs']]]]):
pulumi.set(self, "requests", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesLimitsArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesRequestsArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextArgs:
def __init__(__self__, *,
allow_privilege_escalation: Optional[pulumi.Input[bool]] = None,
capabilities: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextCapabilitiesArgs']] = None,
privileged: Optional[pulumi.Input[bool]] = None,
proc_mount: Optional[pulumi.Input[str]] = None,
read_only_root_filesystem: Optional[pulumi.Input[bool]] = None,
run_as_group: Optional[pulumi.Input[int]] = None,
run_as_non_root: Optional[pulumi.Input[bool]] = None,
run_as_user: Optional[pulumi.Input[int]] = None,
se_linux_options: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextSeLinuxOptionsArgs']] = None,
windows_options: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextWindowsOptionsArgs']] = None):
"""
SecurityContext is not allowed for ephemeral containers.
:param pulumi.Input[bool] allow_privilege_escalation: AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextCapabilitiesArgs'] capabilities: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:param pulumi.Input[bool] privileged: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
:param pulumi.Input[str] proc_mount: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.
:param pulumi.Input[bool] read_only_root_filesystem: Whether this container has a read-only root filesystem. Default is false.
:param pulumi.Input[int] run_as_group: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[bool] run_as_non_root: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[int] run_as_user: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextSeLinuxOptionsArgs'] se_linux_options: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextWindowsOptionsArgs'] windows_options: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
if allow_privilege_escalation is not None:
pulumi.set(__self__, "allow_privilege_escalation", allow_privilege_escalation)
if capabilities is not None:
pulumi.set(__self__, "capabilities", capabilities)
if privileged is not None:
pulumi.set(__self__, "privileged", privileged)
if proc_mount is not None:
pulumi.set(__self__, "proc_mount", proc_mount)
if read_only_root_filesystem is not None:
pulumi.set(__self__, "read_only_root_filesystem", read_only_root_filesystem)
if run_as_group is not None:
pulumi.set(__self__, "run_as_group", run_as_group)
if run_as_non_root is not None:
pulumi.set(__self__, "run_as_non_root", run_as_non_root)
if run_as_user is not None:
pulumi.set(__self__, "run_as_user", run_as_user)
if se_linux_options is not None:
pulumi.set(__self__, "se_linux_options", se_linux_options)
if windows_options is not None:
pulumi.set(__self__, "windows_options", windows_options)
@property
@pulumi.getter(name="allowPrivilegeEscalation")
def allow_privilege_escalation(self) -> Optional[pulumi.Input[bool]]:
"""
AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
"""
return pulumi.get(self, "allow_privilege_escalation")
@allow_privilege_escalation.setter
def allow_privilege_escalation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_privilege_escalation", value)
@property
@pulumi.getter
def capabilities(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextCapabilitiesArgs']]:
"""
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
"""
return pulumi.get(self, "capabilities")
@capabilities.setter
def capabilities(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextCapabilitiesArgs']]):
pulumi.set(self, "capabilities", value)
@property
@pulumi.getter
def privileged(self) -> Optional[pulumi.Input[bool]]:
"""
Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
"""
return pulumi.get(self, "privileged")
@privileged.setter
def privileged(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "privileged", value)
@property
@pulumi.getter(name="procMount")
def proc_mount(self) -> Optional[pulumi.Input[str]]:
"""
procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.
"""
return pulumi.get(self, "proc_mount")
@proc_mount.setter
def proc_mount(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proc_mount", value)
@property
@pulumi.getter(name="readOnlyRootFilesystem")
def read_only_root_filesystem(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this container has a read-only root filesystem. Default is false.
"""
return pulumi.get(self, "read_only_root_filesystem")
@read_only_root_filesystem.setter
def read_only_root_filesystem(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only_root_filesystem", value)
@property
@pulumi.getter(name="runAsGroup")
def run_as_group(self) -> Optional[pulumi.Input[int]]:
"""
The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_group")
@run_as_group.setter
def run_as_group(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "run_as_group", value)
@property
@pulumi.getter(name="runAsNonRoot")
def run_as_non_root(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_non_root")
@run_as_non_root.setter
def run_as_non_root(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "run_as_non_root", value)
@property
@pulumi.getter(name="runAsUser")
def run_as_user(self) -> Optional[pulumi.Input[int]]:
"""
The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_user")
@run_as_user.setter
def run_as_user(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "run_as_user", value)
@property
@pulumi.getter(name="seLinuxOptions")
def se_linux_options(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextSeLinuxOptionsArgs']]:
"""
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "se_linux_options")
@se_linux_options.setter
def se_linux_options(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextSeLinuxOptionsArgs']]):
pulumi.set(self, "se_linux_options", value)
@property
@pulumi.getter(name="windowsOptions")
def windows_options(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextWindowsOptionsArgs']]:
"""
The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "windows_options")
@windows_options.setter
def windows_options(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextWindowsOptionsArgs']]):
pulumi.set(self, "windows_options", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextCapabilitiesArgs:
def __init__(__self__, *,
add: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
drop: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:param pulumi.Input[Sequence[pulumi.Input[str]]] add: Added capabilities
:param pulumi.Input[Sequence[pulumi.Input[str]]] drop: Removed capabilities
"""
if add is not None:
pulumi.set(__self__, "add", add)
if drop is not None:
pulumi.set(__self__, "drop", drop)
@property
@pulumi.getter
def add(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Added capabilities
"""
return pulumi.get(self, "add")
@add.setter
def add(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "add", value)
@property
@pulumi.getter
def drop(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Removed capabilities
"""
return pulumi.get(self, "drop")
@drop.setter
def drop(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "drop", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextSeLinuxOptionsArgs:
def __init__(__self__, *,
level: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None):
"""
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[str] level: Level is SELinux level label that applies to the container.
:param pulumi.Input[str] role: Role is a SELinux role label that applies to the container.
:param pulumi.Input[str] type: Type is a SELinux type label that applies to the container.
:param pulumi.Input[str] user: User is a SELinux user label that applies to the container.
"""
if level is not None:
pulumi.set(__self__, "level", level)
if role is not None:
pulumi.set(__self__, "role", role)
if type is not None:
pulumi.set(__self__, "type", type)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter
def level(self) -> Optional[pulumi.Input[str]]:
"""
Level is SELinux level label that applies to the container.
"""
return pulumi.get(self, "level")
@level.setter
def level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "level", value)
@property
@pulumi.getter
def role(self) -> Optional[pulumi.Input[str]]:
"""
Role is a SELinux role label that applies to the container.
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Type is a SELinux type label that applies to the container.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
"""
User is a SELinux user label that applies to the container.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextWindowsOptionsArgs:
def __init__(__self__, *,
gmsa_credential_spec: Optional[pulumi.Input[str]] = None,
gmsa_credential_spec_name: Optional[pulumi.Input[str]] = None,
run_as_user_name: Optional[pulumi.Input[str]] = None):
"""
The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[str] gmsa_credential_spec: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.
:param pulumi.Input[str] gmsa_credential_spec_name: GMSACredentialSpecName is the name of the GMSA credential spec to use.
:param pulumi.Input[str] run_as_user_name: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
if gmsa_credential_spec is not None:
pulumi.set(__self__, "gmsa_credential_spec", gmsa_credential_spec)
if gmsa_credential_spec_name is not None:
pulumi.set(__self__, "gmsa_credential_spec_name", gmsa_credential_spec_name)
if run_as_user_name is not None:
pulumi.set(__self__, "run_as_user_name", run_as_user_name)
@property
@pulumi.getter(name="gmsaCredentialSpec")
def gmsa_credential_spec(self) -> Optional[pulumi.Input[str]]:
"""
GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.
"""
return pulumi.get(self, "gmsa_credential_spec")
@gmsa_credential_spec.setter
def gmsa_credential_spec(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "gmsa_credential_spec", value)
@property
@pulumi.getter(name="gmsaCredentialSpecName")
def gmsa_credential_spec_name(self) -> Optional[pulumi.Input[str]]:
"""
GMSACredentialSpecName is the name of the GMSA credential spec to use.
"""
return pulumi.get(self, "gmsa_credential_spec_name")
@gmsa_credential_spec_name.setter
def gmsa_credential_spec_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "gmsa_credential_spec_name", value)
@property
@pulumi.getter(name="runAsUserName")
def run_as_user_name(self) -> Optional[pulumi.Input[str]]:
"""
The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_user_name")
@run_as_user_name.setter
def run_as_user_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "run_as_user_name", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeExecArgs']] = None,
failure_threshold: Optional[pulumi.Input[int]] = None,
http_get: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeHttpGetArgs']] = None,
initial_delay_seconds: Optional[pulumi.Input[int]] = None,
period_seconds: Optional[pulumi.Input[int]] = None,
success_threshold: Optional[pulumi.Input[int]] = None,
tcp_socket: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeTcpSocketArgs']] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
Probes are not allowed for ephemeral containers.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[int] failure_threshold: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input[int] initial_delay_seconds: Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[int] period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
:param pulumi.Input[int] success_threshold: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[int] timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if initial_delay_seconds is not None:
pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
if period_seconds is not None:
pulumi.set(__self__, "period_seconds", period_seconds)
if success_threshold is not None:
pulumi.set(__self__, "success_threshold", success_threshold)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="initialDelaySeconds")
def initial_delay_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "initial_delay_seconds")
@initial_delay_seconds.setter
def initial_delay_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial_delay_seconds", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
"""
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter(name="successThreshold")
def success_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
"""
return pulumi.get(self, "success_threshold")
@success_threshold.setter
def success_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "success_threshold", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeHttpGetPortArgs'],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeHttpGetPortArgs'] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeHttpGetPortArgs']:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeHttpGetPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeHttpGetPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeTcpSocketPortArgs'],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeTcpSocketPortArgs'] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeTcpSocketPortArgs']:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeTcpSocketPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersStartupProbeTcpSocketPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersVolumeDevicesArgs:
def __init__(__self__, *,
device_path: pulumi.Input[str],
name: pulumi.Input[str]):
"""
volumeDevice describes a mapping of a raw block device within a container.
:param pulumi.Input[str] device_path: devicePath is the path inside of the container that the device will be mapped to.
:param pulumi.Input[str] name: name must match the name of a persistentVolumeClaim in the pod
"""
pulumi.set(__self__, "device_path", device_path)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="devicePath")
def device_path(self) -> pulumi.Input[str]:
"""
devicePath is the path inside of the container that the device will be mapped to.
"""
return pulumi.get(self, "device_path")
@device_path.setter
def device_path(self, value: pulumi.Input[str]):
pulumi.set(self, "device_path", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
name must match the name of a persistentVolumeClaim in the pod
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersVolumeMountsArgs:
def __init__(__self__, *,
mount_path: pulumi.Input[str],
name: pulumi.Input[str],
mount_propagation: Optional[pulumi.Input[str]] = None,
read_only: Optional[pulumi.Input[bool]] = None,
sub_path: Optional[pulumi.Input[str]] = None,
sub_path_expr: Optional[pulumi.Input[str]] = None):
"""
VolumeMount describes a mounting of a Volume within a container.
:param pulumi.Input[str] mount_path: Path within the container at which the volume should be mounted. Must not contain ':'.
:param pulumi.Input[str] name: This must match the Name of a Volume.
:param pulumi.Input[str] mount_propagation: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
:param pulumi.Input[bool] read_only: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
:param pulumi.Input[str] sub_path: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
:param pulumi.Input[str] sub_path_expr: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.
"""
pulumi.set(__self__, "mount_path", mount_path)
pulumi.set(__self__, "name", name)
if mount_propagation is not None:
pulumi.set(__self__, "mount_propagation", mount_propagation)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if sub_path is not None:
pulumi.set(__self__, "sub_path", sub_path)
if sub_path_expr is not None:
pulumi.set(__self__, "sub_path_expr", sub_path_expr)
@property
@pulumi.getter(name="mountPath")
def mount_path(self) -> pulumi.Input[str]:
"""
Path within the container at which the volume should be mounted. Must not contain ':'.
"""
return pulumi.get(self, "mount_path")
@mount_path.setter
def mount_path(self, value: pulumi.Input[str]):
pulumi.set(self, "mount_path", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
This must match the Name of a Volume.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="mountPropagation")
def mount_propagation(self) -> Optional[pulumi.Input[str]]:
"""
mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
"""
return pulumi.get(self, "mount_propagation")
@mount_propagation.setter
def mount_propagation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mount_propagation", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@property
@pulumi.getter(name="subPath")
def sub_path(self) -> Optional[pulumi.Input[str]]:
"""
Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
"""
return pulumi.get(self, "sub_path")
@sub_path.setter
def sub_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sub_path", value)
@property
@pulumi.getter(name="subPathExpr")
def sub_path_expr(self) -> Optional[pulumi.Input[str]]:
"""
Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.
"""
return pulumi.get(self, "sub_path_expr")
@sub_path_expr.setter
def sub_path_expr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sub_path_expr", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecHostAliasesArgs:
def __init__(__self__, *,
hostnames: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ip: Optional[pulumi.Input[str]] = None):
"""
HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.
:param pulumi.Input[Sequence[pulumi.Input[str]]] hostnames: Hostnames for the above IP address.
:param pulumi.Input[str] ip: IP address of the host file entry.
"""
if hostnames is not None:
pulumi.set(__self__, "hostnames", hostnames)
if ip is not None:
pulumi.set(__self__, "ip", ip)
@property
@pulumi.getter
def hostnames(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Hostnames for the above IP address.
"""
return pulumi.get(self, "hostnames")
@hostnames.setter
def hostnames(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "hostnames", value)
@property
@pulumi.getter
def ip(self) -> Optional[pulumi.Input[str]]:
"""
IP address of the host file entry.
"""
return pulumi.get(self, "ip")
@ip.setter
def ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecImagePullSecretsArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
env: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvArgs']]]] = None,
env_from: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvFromArgs']]]] = None,
image: Optional[pulumi.Input[str]] = None,
image_pull_policy: Optional[pulumi.Input[str]] = None,
lifecycle: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecycleArgs']] = None,
liveness_probe: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeArgs']] = None,
ports: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersPortsArgs']]]] = None,
readiness_probe: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeArgs']] = None,
resources: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersResourcesArgs']] = None,
security_context: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextArgs']] = None,
startup_probe: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeArgs']] = None,
stdin: Optional[pulumi.Input[bool]] = None,
stdin_once: Optional[pulumi.Input[bool]] = None,
termination_message_path: Optional[pulumi.Input[str]] = None,
termination_message_policy: Optional[pulumi.Input[str]] = None,
tty: Optional[pulumi.Input[bool]] = None,
volume_devices: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersVolumeDevicesArgs']]]] = None,
volume_mounts: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersVolumeMountsArgs']]]] = None,
working_dir: Optional[pulumi.Input[str]] = None):
"""
A single application container that you want to run within a pod.
:param pulumi.Input[str] name: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
:param pulumi.Input[Sequence[pulumi.Input[str]]] args: Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvArgs']]] env: List of environment variables to set in the container. Cannot be updated.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvFromArgs']]] env_from: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.
:param pulumi.Input[str] image: Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.
:param pulumi.Input[str] image_pull_policy: Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecycleArgs'] lifecycle: Actions that the management system should take in response to container lifecycle events. Cannot be updated.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeArgs'] liveness_probe: Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersPortsArgs']]] ports: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeArgs'] readiness_probe: Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersResourcesArgs'] resources: Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextArgs'] security_context: Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeArgs'] startup_probe: StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[bool] stdin: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.
:param pulumi.Input[bool] stdin_once: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false
:param pulumi.Input[str] termination_message_path: Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.
:param pulumi.Input[str] termination_message_policy: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.
:param pulumi.Input[bool] tty: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersVolumeDevicesArgs']]] volume_devices: volumeDevices is the list of block devices to be used by the container.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersVolumeMountsArgs']]] volume_mounts: Pod volumes to mount into the container's filesystem. Cannot be updated.
:param pulumi.Input[str] working_dir: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
"""
pulumi.set(__self__, "name", name)
if args is not None:
pulumi.set(__self__, "args", args)
if command is not None:
pulumi.set(__self__, "command", command)
if env is not None:
pulumi.set(__self__, "env", env)
if env_from is not None:
pulumi.set(__self__, "env_from", env_from)
if image is not None:
pulumi.set(__self__, "image", image)
if image_pull_policy is not None:
pulumi.set(__self__, "image_pull_policy", image_pull_policy)
if lifecycle is not None:
pulumi.set(__self__, "lifecycle", lifecycle)
if liveness_probe is not None:
pulumi.set(__self__, "liveness_probe", liveness_probe)
if ports is not None:
pulumi.set(__self__, "ports", ports)
if readiness_probe is not None:
pulumi.set(__self__, "readiness_probe", readiness_probe)
if resources is not None:
pulumi.set(__self__, "resources", resources)
if security_context is not None:
pulumi.set(__self__, "security_context", security_context)
if startup_probe is not None:
pulumi.set(__self__, "startup_probe", startup_probe)
if stdin is not None:
pulumi.set(__self__, "stdin", stdin)
if stdin_once is not None:
pulumi.set(__self__, "stdin_once", stdin_once)
if termination_message_path is not None:
pulumi.set(__self__, "termination_message_path", termination_message_path)
if termination_message_policy is not None:
pulumi.set(__self__, "termination_message_policy", termination_message_policy)
if tty is not None:
pulumi.set(__self__, "tty", tty)
if volume_devices is not None:
pulumi.set(__self__, "volume_devices", volume_devices)
if volume_mounts is not None:
pulumi.set(__self__, "volume_mounts", volume_mounts)
if working_dir is not None:
pulumi.set(__self__, "working_dir", working_dir)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
"""
return pulumi.get(self, "args")
@args.setter
def args(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "args", value)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@property
@pulumi.getter
def env(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvArgs']]]]:
"""
List of environment variables to set in the container. Cannot be updated.
"""
return pulumi.get(self, "env")
@env.setter
def env(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvArgs']]]]):
pulumi.set(self, "env", value)
@property
@pulumi.getter(name="envFrom")
def env_from(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvFromArgs']]]]:
"""
List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.
"""
return pulumi.get(self, "env_from")
@env_from.setter
def env_from(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvFromArgs']]]]):
pulumi.set(self, "env_from", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input[str]]:
"""
Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.
"""
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="imagePullPolicy")
def image_pull_policy(self) -> Optional[pulumi.Input[str]]:
"""
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
"""
return pulumi.get(self, "image_pull_policy")
@image_pull_policy.setter
def image_pull_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_pull_policy", value)
@property
@pulumi.getter
def lifecycle(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecycleArgs']]:
"""
Actions that the management system should take in response to container lifecycle events. Cannot be updated.
"""
return pulumi.get(self, "lifecycle")
@lifecycle.setter
def lifecycle(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecycleArgs']]):
pulumi.set(self, "lifecycle", value)
@property
@pulumi.getter(name="livenessProbe")
def liveness_probe(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeArgs']]:
"""
Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "liveness_probe")
@liveness_probe.setter
def liveness_probe(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeArgs']]):
pulumi.set(self, "liveness_probe", value)
@property
@pulumi.getter
def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersPortsArgs']]]]:
"""
List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
"""
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersPortsArgs']]]]):
pulumi.set(self, "ports", value)
@property
@pulumi.getter(name="readinessProbe")
def readiness_probe(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeArgs']]:
"""
Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "readiness_probe")
@readiness_probe.setter
def readiness_probe(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeArgs']]):
pulumi.set(self, "readiness_probe", value)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersResourcesArgs']]:
"""
Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersResourcesArgs']]):
pulumi.set(self, "resources", value)
@property
@pulumi.getter(name="securityContext")
def security_context(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextArgs']]:
"""
Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
"""
return pulumi.get(self, "security_context")
@security_context.setter
def security_context(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextArgs']]):
pulumi.set(self, "security_context", value)
@property
@pulumi.getter(name="startupProbe")
def startup_probe(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeArgs']]:
"""
StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "startup_probe")
@startup_probe.setter
def startup_probe(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeArgs']]):
pulumi.set(self, "startup_probe", value)
@property
@pulumi.getter
def stdin(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.
"""
return pulumi.get(self, "stdin")
@stdin.setter
def stdin(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "stdin", value)
@property
@pulumi.getter(name="stdinOnce")
def stdin_once(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false
"""
return pulumi.get(self, "stdin_once")
@stdin_once.setter
def stdin_once(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "stdin_once", value)
@property
@pulumi.getter(name="terminationMessagePath")
def termination_message_path(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.
"""
return pulumi.get(self, "termination_message_path")
@termination_message_path.setter
def termination_message_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "termination_message_path", value)
@property
@pulumi.getter(name="terminationMessagePolicy")
def termination_message_policy(self) -> Optional[pulumi.Input[str]]:
"""
Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.
"""
return pulumi.get(self, "termination_message_policy")
@termination_message_policy.setter
def termination_message_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "termination_message_policy", value)
@property
@pulumi.getter
def tty(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.
"""
return pulumi.get(self, "tty")
@tty.setter
def tty(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "tty", value)
@property
@pulumi.getter(name="volumeDevices")
def volume_devices(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersVolumeDevicesArgs']]]]:
"""
volumeDevices is the list of block devices to be used by the container.
"""
return pulumi.get(self, "volume_devices")
@volume_devices.setter
def volume_devices(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersVolumeDevicesArgs']]]]):
pulumi.set(self, "volume_devices", value)
@property
@pulumi.getter(name="volumeMounts")
def volume_mounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersVolumeMountsArgs']]]]:
"""
Pod volumes to mount into the container's filesystem. Cannot be updated.
"""
return pulumi.get(self, "volume_mounts")
@volume_mounts.setter
def volume_mounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersVolumeMountsArgs']]]]):
pulumi.set(self, "volume_mounts", value)
@property
@pulumi.getter(name="workingDir")
def working_dir(self) -> Optional[pulumi.Input[str]]:
"""
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
"""
return pulumi.get(self, "working_dir")
@working_dir.setter
def working_dir(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "working_dir", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: Optional[pulumi.Input[str]] = None,
value_from: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromArgs']] = None):
"""
EnvVar represents an environment variable present in a Container.
:param pulumi.Input[str] name: Name of the environment variable. Must be a C_IDENTIFIER.
:param pulumi.Input[str] value: Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromArgs'] value_from: Source for the environment variable's value. Cannot be used if value is not empty.
"""
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
if value_from is not None:
pulumi.set(__self__, "value_from", value_from)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the environment variable. Must be a C_IDENTIFIER.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="valueFrom")
def value_from(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromArgs']]:
"""
Source for the environment variable's value. Cannot be used if value is not empty.
"""
return pulumi.get(self, "value_from")
@value_from.setter
def value_from(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromArgs']]):
pulumi.set(self, "value_from", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvFromArgs:
def __init__(__self__, *,
config_map_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvFromConfigMapRefArgs']] = None,
prefix: Optional[pulumi.Input[str]] = None,
secret_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvFromSecretRefArgs']] = None):
"""
EnvFromSource represents the source of a set of ConfigMaps
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvFromConfigMapRefArgs'] config_map_ref: The ConfigMap to select from
:param pulumi.Input[str] prefix: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvFromSecretRefArgs'] secret_ref: The Secret to select from
"""
if config_map_ref is not None:
pulumi.set(__self__, "config_map_ref", config_map_ref)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
if secret_ref is not None:
pulumi.set(__self__, "secret_ref", secret_ref)
@property
@pulumi.getter(name="configMapRef")
def config_map_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvFromConfigMapRefArgs']]:
"""
The ConfigMap to select from
"""
return pulumi.get(self, "config_map_ref")
@config_map_ref.setter
def config_map_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvFromConfigMapRefArgs']]):
pulumi.set(self, "config_map_ref", value)
@property
@pulumi.getter
def prefix(self) -> Optional[pulumi.Input[str]]:
"""
An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
"""
return pulumi.get(self, "prefix")
@prefix.setter
def prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prefix", value)
@property
@pulumi.getter(name="secretRef")
def secret_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvFromSecretRefArgs']]:
"""
The Secret to select from
"""
return pulumi.get(self, "secret_ref")
@secret_ref.setter
def secret_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvFromSecretRefArgs']]):
pulumi.set(self, "secret_ref", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvFromConfigMapRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
The ConfigMap to select from
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the ConfigMap must be defined
"""
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the ConfigMap must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvFromSecretRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
The Secret to select from
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the Secret must be defined
"""
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the Secret must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromArgs:
def __init__(__self__, *,
config_map_key_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromConfigMapKeyRefArgs']] = None,
field_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromFieldRefArgs']] = None,
resource_field_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromResourceFieldRefArgs']] = None,
secret_key_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromSecretKeyRefArgs']] = None):
"""
Source for the environment variable's value. Cannot be used if value is not empty.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromConfigMapKeyRefArgs'] config_map_key_ref: Selects a key of a ConfigMap.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromFieldRefArgs'] field_ref: Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromResourceFieldRefArgs'] resource_field_ref: Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromSecretKeyRefArgs'] secret_key_ref: Selects a key of a secret in the pod's namespace
"""
if config_map_key_ref is not None:
pulumi.set(__self__, "config_map_key_ref", config_map_key_ref)
if field_ref is not None:
pulumi.set(__self__, "field_ref", field_ref)
if resource_field_ref is not None:
pulumi.set(__self__, "resource_field_ref", resource_field_ref)
if secret_key_ref is not None:
pulumi.set(__self__, "secret_key_ref", secret_key_ref)
@property
@pulumi.getter(name="configMapKeyRef")
def config_map_key_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromConfigMapKeyRefArgs']]:
"""
Selects a key of a ConfigMap.
"""
return pulumi.get(self, "config_map_key_ref")
@config_map_key_ref.setter
def config_map_key_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromConfigMapKeyRefArgs']]):
pulumi.set(self, "config_map_key_ref", value)
@property
@pulumi.getter(name="fieldRef")
def field_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromFieldRefArgs']]:
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
"""
return pulumi.get(self, "field_ref")
@field_ref.setter
def field_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromFieldRefArgs']]):
pulumi.set(self, "field_ref", value)
@property
@pulumi.getter(name="resourceFieldRef")
def resource_field_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromResourceFieldRefArgs']]:
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
"""
return pulumi.get(self, "resource_field_ref")
@resource_field_ref.setter
def resource_field_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromResourceFieldRefArgs']]):
pulumi.set(self, "resource_field_ref", value)
@property
@pulumi.getter(name="secretKeyRef")
def secret_key_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromSecretKeyRefArgs']]:
"""
Selects a key of a secret in the pod's namespace
"""
return pulumi.get(self, "secret_key_ref")
@secret_key_ref.setter
def secret_key_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromSecretKeyRefArgs']]):
pulumi.set(self, "secret_key_ref", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromConfigMapKeyRefArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
Selects a key of a ConfigMap.
:param pulumi.Input[str] key: The key to select.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the ConfigMap or its key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key to select.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the ConfigMap or its key must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromFieldRefArgs:
def __init__(__self__, *,
field_path: pulumi.Input[str],
api_version: Optional[pulumi.Input[str]] = None):
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
:param pulumi.Input[str] field_path: Path of the field to select in the specified API version.
:param pulumi.Input[str] api_version: Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
pulumi.set(__self__, "field_path", field_path)
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> pulumi.Input[str]:
"""
Path of the field to select in the specified API version.
"""
return pulumi.get(self, "field_path")
@field_path.setter
def field_path(self, value: pulumi.Input[str]):
pulumi.set(self, "field_path", value)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromResourceFieldRefArgs:
def __init__(__self__, *,
resource: pulumi.Input[str],
container_name: Optional[pulumi.Input[str]] = None,
divisor: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromResourceFieldRefDivisorArgs']] = None):
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param pulumi.Input[str] resource: Required: resource to select
:param pulumi.Input[str] container_name: Container name: required for volumes, optional for env vars
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromResourceFieldRefDivisorArgs'] divisor: Specifies the output format of the exposed resources, defaults to "1"
"""
pulumi.set(__self__, "resource", resource)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if divisor is not None:
pulumi.set(__self__, "divisor", divisor)
@property
@pulumi.getter
def resource(self) -> pulumi.Input[str]:
"""
Required: resource to select
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: pulumi.Input[str]):
pulumi.set(self, "resource", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Container name: required for volumes, optional for env vars
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter
def divisor(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromResourceFieldRefDivisorArgs']]:
"""
Specifies the output format of the exposed resources, defaults to "1"
"""
return pulumi.get(self, "divisor")
@divisor.setter
def divisor(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromResourceFieldRefDivisorArgs']]):
pulumi.set(self, "divisor", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromResourceFieldRefDivisorArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersEnvValueFromSecretKeyRefArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
Selects a key of a secret in the pod's namespace
:param pulumi.Input[str] key: The key of the secret to select from. Must be a valid secret key.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the Secret or its key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key of the secret to select from. Must be a valid secret key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the Secret or its key must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecycleArgs:
def __init__(__self__, *,
post_start: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartArgs']] = None,
pre_stop: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopArgs']] = None):
"""
Actions that the management system should take in response to container lifecycle events. Cannot be updated.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartArgs'] post_start: PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopArgs'] pre_stop: PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
if post_start is not None:
pulumi.set(__self__, "post_start", post_start)
if pre_stop is not None:
pulumi.set(__self__, "pre_stop", pre_stop)
@property
@pulumi.getter(name="postStart")
def post_start(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartArgs']]:
"""
PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
return pulumi.get(self, "post_start")
@post_start.setter
def post_start(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartArgs']]):
pulumi.set(self, "post_start", value)
@property
@pulumi.getter(name="preStop")
def pre_stop(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopArgs']]:
"""
PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
return pulumi.get(self, "pre_stop")
@pre_stop.setter
def pre_stop(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopArgs']]):
pulumi.set(self, "pre_stop", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartExecArgs']] = None,
http_get: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartHttpGetArgs']] = None,
tcp_socket: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartTcpSocketArgs']] = None):
"""
PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input[Union[int, str]] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePostStartTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[Union[int, str]] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopExecArgs']] = None,
http_get: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopHttpGetArgs']] = None,
tcp_socket: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopTcpSocketArgs']] = None):
"""
PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input[Union[int, str]] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLifecyclePreStopTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[Union[int, str]] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeExecArgs']] = None,
failure_threshold: Optional[pulumi.Input[int]] = None,
http_get: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeHttpGetArgs']] = None,
initial_delay_seconds: Optional[pulumi.Input[int]] = None,
period_seconds: Optional[pulumi.Input[int]] = None,
success_threshold: Optional[pulumi.Input[int]] = None,
tcp_socket: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeTcpSocketArgs']] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[int] failure_threshold: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input[int] initial_delay_seconds: Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[int] period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
:param pulumi.Input[int] success_threshold: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[int] timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if initial_delay_seconds is not None:
pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
if period_seconds is not None:
pulumi.set(__self__, "period_seconds", period_seconds)
if success_threshold is not None:
pulumi.set(__self__, "success_threshold", success_threshold)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="initialDelaySeconds")
def initial_delay_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "initial_delay_seconds")
@initial_delay_seconds.setter
def initial_delay_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial_delay_seconds", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
"""
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter(name="successThreshold")
def success_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
"""
return pulumi.get(self, "success_threshold")
@success_threshold.setter
def success_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "success_threshold", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input[Union[int, str]] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersLivenessProbeTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[Union[int, str]] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersPortsArgs:
def __init__(__self__, *,
container_port: pulumi.Input[int],
protocol: pulumi.Input[str],
host_ip: Optional[pulumi.Input[str]] = None,
host_port: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
ContainerPort represents a network port in a single container.
:param pulumi.Input[int] container_port: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.
:param pulumi.Input[str] protocol: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".
:param pulumi.Input[str] host_ip: What host IP to bind the external port to.
:param pulumi.Input[int] host_port: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.
:param pulumi.Input[str] name: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.
"""
pulumi.set(__self__, "container_port", container_port)
pulumi.set(__self__, "protocol", protocol)
if host_ip is not None:
pulumi.set(__self__, "host_ip", host_ip)
if host_port is not None:
pulumi.set(__self__, "host_port", host_port)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="containerPort")
def container_port(self) -> pulumi.Input[int]:
"""
Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.
"""
return pulumi.get(self, "container_port")
@container_port.setter
def container_port(self, value: pulumi.Input[int]):
pulumi.set(self, "container_port", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[str]:
"""
Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[str]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="hostIP")
def host_ip(self) -> Optional[pulumi.Input[str]]:
"""
What host IP to bind the external port to.
"""
return pulumi.get(self, "host_ip")
@host_ip.setter
def host_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_ip", value)
@property
@pulumi.getter(name="hostPort")
def host_port(self) -> Optional[pulumi.Input[int]]:
"""
Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.
"""
return pulumi.get(self, "host_port")
@host_port.setter
def host_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "host_port", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeExecArgs']] = None,
failure_threshold: Optional[pulumi.Input[int]] = None,
http_get: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeHttpGetArgs']] = None,
initial_delay_seconds: Optional[pulumi.Input[int]] = None,
period_seconds: Optional[pulumi.Input[int]] = None,
success_threshold: Optional[pulumi.Input[int]] = None,
tcp_socket: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeTcpSocketArgs']] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[int] failure_threshold: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input[int] initial_delay_seconds: Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[int] period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
:param pulumi.Input[int] success_threshold: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[int] timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if initial_delay_seconds is not None:
pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
if period_seconds is not None:
pulumi.set(__self__, "period_seconds", period_seconds)
if success_threshold is not None:
pulumi.set(__self__, "success_threshold", success_threshold)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="initialDelaySeconds")
def initial_delay_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "initial_delay_seconds")
@initial_delay_seconds.setter
def initial_delay_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial_delay_seconds", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
"""
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter(name="successThreshold")
def success_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
"""
return pulumi.get(self, "success_threshold")
@success_threshold.setter
def success_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "success_threshold", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input[Union[int, str]] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersReadinessProbeTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[Union[int, str]] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersResourcesArgs:
def __init__(__self__, *,
limits: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersResourcesLimitsArgs']]]] = None,
requests: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersResourcesRequestsArgs']]]] = None):
"""
Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersResourcesLimitsArgs']]] limits: Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersResourcesRequestsArgs']]] requests: Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
if limits is not None:
pulumi.set(__self__, "limits", limits)
if requests is not None:
pulumi.set(__self__, "requests", requests)
@property
@pulumi.getter
def limits(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersResourcesLimitsArgs']]]]:
"""
Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "limits")
@limits.setter
def limits(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersResourcesLimitsArgs']]]]):
pulumi.set(self, "limits", value)
@property
@pulumi.getter
def requests(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersResourcesRequestsArgs']]]]:
"""
Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "requests")
@requests.setter
def requests(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersResourcesRequestsArgs']]]]):
pulumi.set(self, "requests", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersResourcesLimitsArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersResourcesRequestsArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextArgs:
def __init__(__self__, *,
allow_privilege_escalation: Optional[pulumi.Input[bool]] = None,
capabilities: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextCapabilitiesArgs']] = None,
privileged: Optional[pulumi.Input[bool]] = None,
proc_mount: Optional[pulumi.Input[str]] = None,
read_only_root_filesystem: Optional[pulumi.Input[bool]] = None,
run_as_group: Optional[pulumi.Input[int]] = None,
run_as_non_root: Optional[pulumi.Input[bool]] = None,
run_as_user: Optional[pulumi.Input[int]] = None,
se_linux_options: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextSeLinuxOptionsArgs']] = None,
windows_options: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextWindowsOptionsArgs']] = None):
"""
Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
:param pulumi.Input[bool] allow_privilege_escalation: AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextCapabilitiesArgs'] capabilities: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:param pulumi.Input[bool] privileged: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
:param pulumi.Input[str] proc_mount: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.
:param pulumi.Input[bool] read_only_root_filesystem: Whether this container has a read-only root filesystem. Default is false.
:param pulumi.Input[int] run_as_group: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[bool] run_as_non_root: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[int] run_as_user: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextSeLinuxOptionsArgs'] se_linux_options: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextWindowsOptionsArgs'] windows_options: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
if allow_privilege_escalation is not None:
pulumi.set(__self__, "allow_privilege_escalation", allow_privilege_escalation)
if capabilities is not None:
pulumi.set(__self__, "capabilities", capabilities)
if privileged is not None:
pulumi.set(__self__, "privileged", privileged)
if proc_mount is not None:
pulumi.set(__self__, "proc_mount", proc_mount)
if read_only_root_filesystem is not None:
pulumi.set(__self__, "read_only_root_filesystem", read_only_root_filesystem)
if run_as_group is not None:
pulumi.set(__self__, "run_as_group", run_as_group)
if run_as_non_root is not None:
pulumi.set(__self__, "run_as_non_root", run_as_non_root)
if run_as_user is not None:
pulumi.set(__self__, "run_as_user", run_as_user)
if se_linux_options is not None:
pulumi.set(__self__, "se_linux_options", se_linux_options)
if windows_options is not None:
pulumi.set(__self__, "windows_options", windows_options)
@property
@pulumi.getter(name="allowPrivilegeEscalation")
def allow_privilege_escalation(self) -> Optional[pulumi.Input[bool]]:
"""
AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
"""
return pulumi.get(self, "allow_privilege_escalation")
@allow_privilege_escalation.setter
def allow_privilege_escalation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_privilege_escalation", value)
@property
@pulumi.getter
def capabilities(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextCapabilitiesArgs']]:
"""
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
"""
return pulumi.get(self, "capabilities")
@capabilities.setter
def capabilities(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextCapabilitiesArgs']]):
pulumi.set(self, "capabilities", value)
@property
@pulumi.getter
def privileged(self) -> Optional[pulumi.Input[bool]]:
"""
Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
"""
return pulumi.get(self, "privileged")
@privileged.setter
def privileged(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "privileged", value)
@property
@pulumi.getter(name="procMount")
def proc_mount(self) -> Optional[pulumi.Input[str]]:
"""
procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.
"""
return pulumi.get(self, "proc_mount")
@proc_mount.setter
def proc_mount(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proc_mount", value)
@property
@pulumi.getter(name="readOnlyRootFilesystem")
def read_only_root_filesystem(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this container has a read-only root filesystem. Default is false.
"""
return pulumi.get(self, "read_only_root_filesystem")
@read_only_root_filesystem.setter
def read_only_root_filesystem(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only_root_filesystem", value)
@property
@pulumi.getter(name="runAsGroup")
def run_as_group(self) -> Optional[pulumi.Input[int]]:
"""
The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_group")
@run_as_group.setter
def run_as_group(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "run_as_group", value)
@property
@pulumi.getter(name="runAsNonRoot")
def run_as_non_root(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_non_root")
@run_as_non_root.setter
def run_as_non_root(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "run_as_non_root", value)
@property
@pulumi.getter(name="runAsUser")
def run_as_user(self) -> Optional[pulumi.Input[int]]:
"""
The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_user")
@run_as_user.setter
def run_as_user(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "run_as_user", value)
@property
@pulumi.getter(name="seLinuxOptions")
def se_linux_options(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextSeLinuxOptionsArgs']]:
"""
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "se_linux_options")
@se_linux_options.setter
def se_linux_options(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextSeLinuxOptionsArgs']]):
pulumi.set(self, "se_linux_options", value)
@property
@pulumi.getter(name="windowsOptions")
def windows_options(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextWindowsOptionsArgs']]:
"""
The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "windows_options")
@windows_options.setter
def windows_options(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextWindowsOptionsArgs']]):
pulumi.set(self, "windows_options", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextCapabilitiesArgs:
def __init__(__self__, *,
add: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
drop: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:param pulumi.Input[Sequence[pulumi.Input[str]]] add: Added capabilities
:param pulumi.Input[Sequence[pulumi.Input[str]]] drop: Removed capabilities
"""
if add is not None:
pulumi.set(__self__, "add", add)
if drop is not None:
pulumi.set(__self__, "drop", drop)
@property
@pulumi.getter
def add(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Added capabilities
"""
return pulumi.get(self, "add")
@add.setter
def add(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "add", value)
@property
@pulumi.getter
def drop(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Removed capabilities
"""
return pulumi.get(self, "drop")
@drop.setter
def drop(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "drop", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextSeLinuxOptionsArgs:
def __init__(__self__, *,
level: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None):
"""
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[str] level: Level is SELinux level label that applies to the container.
:param pulumi.Input[str] role: Role is a SELinux role label that applies to the container.
:param pulumi.Input[str] type: Type is a SELinux type label that applies to the container.
:param pulumi.Input[str] user: User is a SELinux user label that applies to the container.
"""
if level is not None:
pulumi.set(__self__, "level", level)
if role is not None:
pulumi.set(__self__, "role", role)
if type is not None:
pulumi.set(__self__, "type", type)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter
def level(self) -> Optional[pulumi.Input[str]]:
"""
Level is SELinux level label that applies to the container.
"""
return pulumi.get(self, "level")
@level.setter
def level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "level", value)
@property
@pulumi.getter
def role(self) -> Optional[pulumi.Input[str]]:
"""
Role is a SELinux role label that applies to the container.
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Type is a SELinux type label that applies to the container.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
"""
User is a SELinux user label that applies to the container.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersSecurityContextWindowsOptionsArgs:
def __init__(__self__, *,
gmsa_credential_spec: Optional[pulumi.Input[str]] = None,
gmsa_credential_spec_name: Optional[pulumi.Input[str]] = None,
run_as_user_name: Optional[pulumi.Input[str]] = None):
"""
The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[str] gmsa_credential_spec: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.
:param pulumi.Input[str] gmsa_credential_spec_name: GMSACredentialSpecName is the name of the GMSA credential spec to use.
:param pulumi.Input[str] run_as_user_name: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
if gmsa_credential_spec is not None:
pulumi.set(__self__, "gmsa_credential_spec", gmsa_credential_spec)
if gmsa_credential_spec_name is not None:
pulumi.set(__self__, "gmsa_credential_spec_name", gmsa_credential_spec_name)
if run_as_user_name is not None:
pulumi.set(__self__, "run_as_user_name", run_as_user_name)
@property
@pulumi.getter(name="gmsaCredentialSpec")
def gmsa_credential_spec(self) -> Optional[pulumi.Input[str]]:
"""
GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.
"""
return pulumi.get(self, "gmsa_credential_spec")
@gmsa_credential_spec.setter
def gmsa_credential_spec(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "gmsa_credential_spec", value)
@property
@pulumi.getter(name="gmsaCredentialSpecName")
def gmsa_credential_spec_name(self) -> Optional[pulumi.Input[str]]:
"""
GMSACredentialSpecName is the name of the GMSA credential spec to use.
"""
return pulumi.get(self, "gmsa_credential_spec_name")
@gmsa_credential_spec_name.setter
def gmsa_credential_spec_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "gmsa_credential_spec_name", value)
@property
@pulumi.getter(name="runAsUserName")
def run_as_user_name(self) -> Optional[pulumi.Input[str]]:
"""
The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_user_name")
@run_as_user_name.setter
def run_as_user_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "run_as_user_name", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeExecArgs']] = None,
failure_threshold: Optional[pulumi.Input[int]] = None,
http_get: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeHttpGetArgs']] = None,
initial_delay_seconds: Optional[pulumi.Input[int]] = None,
period_seconds: Optional[pulumi.Input[int]] = None,
success_threshold: Optional[pulumi.Input[int]] = None,
tcp_socket: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeTcpSocketArgs']] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[int] failure_threshold: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input[int] initial_delay_seconds: Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[int] period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
:param pulumi.Input[int] success_threshold: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[int] timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if initial_delay_seconds is not None:
pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
if period_seconds is not None:
pulumi.set(__self__, "period_seconds", period_seconds)
if success_threshold is not None:
pulumi.set(__self__, "success_threshold", success_threshold)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="initialDelaySeconds")
def initial_delay_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "initial_delay_seconds")
@initial_delay_seconds.setter
def initial_delay_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial_delay_seconds", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
"""
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter(name="successThreshold")
def success_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
"""
return pulumi.get(self, "success_threshold")
@success_threshold.setter
def success_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "success_threshold", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeHttpGetPortArgs'],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeHttpGetPortArgs'] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeHttpGetPortArgs']:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeHttpGetPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeHttpGetPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeTcpSocketPortArgs'],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeTcpSocketPortArgs'] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeTcpSocketPortArgs']:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeTcpSocketPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeTcpSocketPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersVolumeDevicesArgs:
def __init__(__self__, *,
device_path: pulumi.Input[str],
name: pulumi.Input[str]):
"""
volumeDevice describes a mapping of a raw block device within a container.
:param pulumi.Input[str] device_path: devicePath is the path inside of the container that the device will be mapped to.
:param pulumi.Input[str] name: name must match the name of a persistentVolumeClaim in the pod
"""
pulumi.set(__self__, "device_path", device_path)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="devicePath")
def device_path(self) -> pulumi.Input[str]:
"""
devicePath is the path inside of the container that the device will be mapped to.
"""
return pulumi.get(self, "device_path")
@device_path.setter
def device_path(self, value: pulumi.Input[str]):
pulumi.set(self, "device_path", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
name must match the name of a persistentVolumeClaim in the pod
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersVolumeMountsArgs:
def __init__(__self__, *,
mount_path: pulumi.Input[str],
name: pulumi.Input[str],
mount_propagation: Optional[pulumi.Input[str]] = None,
read_only: Optional[pulumi.Input[bool]] = None,
sub_path: Optional[pulumi.Input[str]] = None,
sub_path_expr: Optional[pulumi.Input[str]] = None):
"""
VolumeMount describes a mounting of a Volume within a container.
:param pulumi.Input[str] mount_path: Path within the container at which the volume should be mounted. Must not contain ':'.
:param pulumi.Input[str] name: This must match the Name of a Volume.
:param pulumi.Input[str] mount_propagation: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
:param pulumi.Input[bool] read_only: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
:param pulumi.Input[str] sub_path: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
:param pulumi.Input[str] sub_path_expr: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.
"""
pulumi.set(__self__, "mount_path", mount_path)
pulumi.set(__self__, "name", name)
if mount_propagation is not None:
pulumi.set(__self__, "mount_propagation", mount_propagation)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if sub_path is not None:
pulumi.set(__self__, "sub_path", sub_path)
if sub_path_expr is not None:
pulumi.set(__self__, "sub_path_expr", sub_path_expr)
@property
@pulumi.getter(name="mountPath")
def mount_path(self) -> pulumi.Input[str]:
"""
Path within the container at which the volume should be mounted. Must not contain ':'.
"""
return pulumi.get(self, "mount_path")
@mount_path.setter
def mount_path(self, value: pulumi.Input[str]):
pulumi.set(self, "mount_path", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
This must match the Name of a Volume.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="mountPropagation")
def mount_propagation(self) -> Optional[pulumi.Input[str]]:
"""
mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
"""
return pulumi.get(self, "mount_propagation")
@mount_propagation.setter
def mount_propagation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mount_propagation", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@property
@pulumi.getter(name="subPath")
def sub_path(self) -> Optional[pulumi.Input[str]]:
"""
Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
"""
return pulumi.get(self, "sub_path")
@sub_path.setter
def sub_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sub_path", value)
@property
@pulumi.getter(name="subPathExpr")
def sub_path_expr(self) -> Optional[pulumi.Input[str]]:
"""
Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.
"""
return pulumi.get(self, "sub_path_expr")
@sub_path_expr.setter
def sub_path_expr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sub_path_expr", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecOverheadArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecReadinessGatesArgs:
def __init__(__self__, *,
condition_type: pulumi.Input[str]):
"""
PodReadinessGate contains the reference to a pod condition
:param pulumi.Input[str] condition_type: ConditionType refers to a condition in the pod's condition list with matching type.
"""
pulumi.set(__self__, "condition_type", condition_type)
@property
@pulumi.getter(name="conditionType")
def condition_type(self) -> pulumi.Input[str]:
"""
ConditionType refers to a condition in the pod's condition list with matching type.
"""
return pulumi.get(self, "condition_type")
@condition_type.setter
def condition_type(self, value: pulumi.Input[str]):
pulumi.set(self, "condition_type", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextArgs:
def __init__(__self__, *,
fs_group: Optional[pulumi.Input[int]] = None,
fs_group_change_policy: Optional[pulumi.Input[str]] = None,
run_as_group: Optional[pulumi.Input[int]] = None,
run_as_non_root: Optional[pulumi.Input[bool]] = None,
run_as_user: Optional[pulumi.Input[int]] = None,
se_linux_options: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextSeLinuxOptionsArgs']] = None,
supplemental_groups: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
sysctls: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextSysctlsArgs']]]] = None,
windows_options: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextWindowsOptionsArgs']] = None):
"""
SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.
:param pulumi.Input[int] fs_group: A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:
1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----
If unset, the Kubelet will not modify the ownership and permissions of any volume.
:param pulumi.Input[str] fs_group_change_policy: fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always".
:param pulumi.Input[int] run_as_group: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.
:param pulumi.Input[bool] run_as_non_root: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[int] run_as_user: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextSeLinuxOptionsArgs'] se_linux_options: The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.
:param pulumi.Input[Sequence[pulumi.Input[int]]] supplemental_groups: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextSysctlsArgs']]] sysctls: Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextWindowsOptionsArgs'] windows_options: The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
if fs_group is not None:
pulumi.set(__self__, "fs_group", fs_group)
if fs_group_change_policy is not None:
pulumi.set(__self__, "fs_group_change_policy", fs_group_change_policy)
if run_as_group is not None:
pulumi.set(__self__, "run_as_group", run_as_group)
if run_as_non_root is not None:
pulumi.set(__self__, "run_as_non_root", run_as_non_root)
if run_as_user is not None:
pulumi.set(__self__, "run_as_user", run_as_user)
if se_linux_options is not None:
pulumi.set(__self__, "se_linux_options", se_linux_options)
if supplemental_groups is not None:
pulumi.set(__self__, "supplemental_groups", supplemental_groups)
if sysctls is not None:
pulumi.set(__self__, "sysctls", sysctls)
if windows_options is not None:
pulumi.set(__self__, "windows_options", windows_options)
@property
@pulumi.getter(name="fsGroup")
def fs_group(self) -> Optional[pulumi.Input[int]]:
"""
A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:
1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----
If unset, the Kubelet will not modify the ownership and permissions of any volume.
"""
return pulumi.get(self, "fs_group")
@fs_group.setter
def fs_group(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "fs_group", value)
@property
@pulumi.getter(name="fsGroupChangePolicy")
def fs_group_change_policy(self) -> Optional[pulumi.Input[str]]:
"""
fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always".
"""
return pulumi.get(self, "fs_group_change_policy")
@fs_group_change_policy.setter
def fs_group_change_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fs_group_change_policy", value)
@property
@pulumi.getter(name="runAsGroup")
def run_as_group(self) -> Optional[pulumi.Input[int]]:
"""
The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.
"""
return pulumi.get(self, "run_as_group")
@run_as_group.setter
def run_as_group(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "run_as_group", value)
@property
@pulumi.getter(name="runAsNonRoot")
def run_as_non_root(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_non_root")
@run_as_non_root.setter
def run_as_non_root(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "run_as_non_root", value)
@property
@pulumi.getter(name="runAsUser")
def run_as_user(self) -> Optional[pulumi.Input[int]]:
"""
The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.
"""
return pulumi.get(self, "run_as_user")
@run_as_user.setter
def run_as_user(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "run_as_user", value)
@property
@pulumi.getter(name="seLinuxOptions")
def se_linux_options(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextSeLinuxOptionsArgs']]:
"""
The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.
"""
return pulumi.get(self, "se_linux_options")
@se_linux_options.setter
def se_linux_options(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextSeLinuxOptionsArgs']]):
pulumi.set(self, "se_linux_options", value)
@property
@pulumi.getter(name="supplementalGroups")
def supplemental_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
"""
A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.
"""
return pulumi.get(self, "supplemental_groups")
@supplemental_groups.setter
def supplemental_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "supplemental_groups", value)
@property
@pulumi.getter
def sysctls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextSysctlsArgs']]]]:
"""
Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.
"""
return pulumi.get(self, "sysctls")
@sysctls.setter
def sysctls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextSysctlsArgs']]]]):
pulumi.set(self, "sysctls", value)
@property
@pulumi.getter(name="windowsOptions")
def windows_options(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextWindowsOptionsArgs']]:
"""
The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "windows_options")
@windows_options.setter
def windows_options(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextWindowsOptionsArgs']]):
pulumi.set(self, "windows_options", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextSeLinuxOptionsArgs:
def __init__(__self__, *,
level: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None):
"""
The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.
:param pulumi.Input[str] level: Level is SELinux level label that applies to the container.
:param pulumi.Input[str] role: Role is a SELinux role label that applies to the container.
:param pulumi.Input[str] type: Type is a SELinux type label that applies to the container.
:param pulumi.Input[str] user: User is a SELinux user label that applies to the container.
"""
if level is not None:
pulumi.set(__self__, "level", level)
if role is not None:
pulumi.set(__self__, "role", role)
if type is not None:
pulumi.set(__self__, "type", type)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter
def level(self) -> Optional[pulumi.Input[str]]:
"""
Level is SELinux level label that applies to the container.
"""
return pulumi.get(self, "level")
@level.setter
def level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "level", value)
@property
@pulumi.getter
def role(self) -> Optional[pulumi.Input[str]]:
"""
Role is a SELinux role label that applies to the container.
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Type is a SELinux type label that applies to the container.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
"""
User is a SELinux user label that applies to the container.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextSysctlsArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
Sysctl defines a kernel parameter to be set
:param pulumi.Input[str] name: Name of a property to set
:param pulumi.Input[str] value: Value of a property to set
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of a property to set
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
Value of a property to set
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextWindowsOptionsArgs:
def __init__(__self__, *,
gmsa_credential_spec: Optional[pulumi.Input[str]] = None,
gmsa_credential_spec_name: Optional[pulumi.Input[str]] = None,
run_as_user_name: Optional[pulumi.Input[str]] = None):
"""
The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[str] gmsa_credential_spec: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.
:param pulumi.Input[str] gmsa_credential_spec_name: GMSACredentialSpecName is the name of the GMSA credential spec to use.
:param pulumi.Input[str] run_as_user_name: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
if gmsa_credential_spec is not None:
pulumi.set(__self__, "gmsa_credential_spec", gmsa_credential_spec)
if gmsa_credential_spec_name is not None:
pulumi.set(__self__, "gmsa_credential_spec_name", gmsa_credential_spec_name)
if run_as_user_name is not None:
pulumi.set(__self__, "run_as_user_name", run_as_user_name)
@property
@pulumi.getter(name="gmsaCredentialSpec")
def gmsa_credential_spec(self) -> Optional[pulumi.Input[str]]:
"""
GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.
"""
return pulumi.get(self, "gmsa_credential_spec")
@gmsa_credential_spec.setter
def gmsa_credential_spec(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "gmsa_credential_spec", value)
@property
@pulumi.getter(name="gmsaCredentialSpecName")
def gmsa_credential_spec_name(self) -> Optional[pulumi.Input[str]]:
"""
GMSACredentialSpecName is the name of the GMSA credential spec to use.
"""
return pulumi.get(self, "gmsa_credential_spec_name")
@gmsa_credential_spec_name.setter
def gmsa_credential_spec_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "gmsa_credential_spec_name", value)
@property
@pulumi.getter(name="runAsUserName")
def run_as_user_name(self) -> Optional[pulumi.Input[str]]:
"""
The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_user_name")
@run_as_user_name.setter
def run_as_user_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "run_as_user_name", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecTolerationsArgs:
def __init__(__self__, *,
effect: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
operator: Optional[pulumi.Input[str]] = None,
toleration_seconds: Optional[pulumi.Input[int]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
:param pulumi.Input[str] effect: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
:param pulumi.Input[str] key: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
:param pulumi.Input[str] operator: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
:param pulumi.Input[int] toleration_seconds: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
:param pulumi.Input[str] value: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
"""
if effect is not None:
pulumi.set(__self__, "effect", effect)
if key is not None:
pulumi.set(__self__, "key", key)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if toleration_seconds is not None:
pulumi.set(__self__, "toleration_seconds", toleration_seconds)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def effect(self) -> Optional[pulumi.Input[str]]:
"""
Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
"""
return pulumi.get(self, "effect")
@effect.setter
def effect(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "effect", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> Optional[pulumi.Input[str]]:
"""
Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter(name="tolerationSeconds")
def toleration_seconds(self) -> Optional[pulumi.Input[int]]:
"""
TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
"""
return pulumi.get(self, "toleration_seconds")
@toleration_seconds.setter
def toleration_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "toleration_seconds", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraintsArgs:
def __init__(__self__, *,
max_skew: pulumi.Input[int],
topology_key: pulumi.Input[str],
when_unsatisfiable: pulumi.Input[str],
label_selector: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraintsLabelSelectorArgs']] = None):
"""
TopologySpreadConstraint specifies how to spread matching pods among the given topology.
:param pulumi.Input[int] max_skew: MaxSkew describes the degree to which pods may be unevenly distributed. It's the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. It's a required field. Default value is 1 and 0 is not allowed.
:param pulumi.Input[str] topology_key: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. It's a required field.
:param pulumi.Input[str] when_unsatisfiable: WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It's considered as "Unsatisfiable" if and only if placing incoming pod on any topology violates "MaxSkew". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.
:param pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraintsLabelSelectorArgs'] label_selector: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.
"""
pulumi.set(__self__, "max_skew", max_skew)
pulumi.set(__self__, "topology_key", topology_key)
pulumi.set(__self__, "when_unsatisfiable", when_unsatisfiable)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
@property
@pulumi.getter(name="maxSkew")
def max_skew(self) -> pulumi.Input[int]:
"""
MaxSkew describes the degree to which pods may be unevenly distributed. It's the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. It's a required field. Default value is 1 and 0 is not allowed.
"""
return pulumi.get(self, "max_skew")
@max_skew.setter
def max_skew(self, value: pulumi.Input[int]):
pulumi.set(self, "max_skew", value)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> pulumi.Input[str]:
"""
TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a "bucket", and try to put balanced number of pods into each bucket. It's a required field.
"""
return pulumi.get(self, "topology_key")
@topology_key.setter
def topology_key(self, value: pulumi.Input[str]):
pulumi.set(self, "topology_key", value)
@property
@pulumi.getter(name="whenUnsatisfiable")
def when_unsatisfiable(self) -> pulumi.Input[str]:
"""
WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It's considered as "Unsatisfiable" if and only if placing incoming pod on any topology violates "MaxSkew". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.
"""
return pulumi.get(self, "when_unsatisfiable")
@when_unsatisfiable.setter
def when_unsatisfiable(self, value: pulumi.Input[str]):
pulumi.set(self, "when_unsatisfiable", value)
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraintsLabelSelectorArgs']]:
"""
LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.
"""
return pulumi.get(self, "label_selector")
@label_selector.setter
def label_selector(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraintsLabelSelectorArgs']]):
pulumi.set(self, "label_selector", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraintsLabelSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraintsLabelSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraintsLabelSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraintsLabelSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraintsLabelSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecTopologySpreadConstraintsLabelSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsEngineResourcesArgs:
def __init__(__self__, *,
limits: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsEngineResourcesLimitsArgs']]]] = None,
requests: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsEngineResourcesRequestsArgs']]]] = None):
"""
ResourceRequirements describes the compute resource requirements.
:param pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsEngineResourcesLimitsArgs']]] limits: Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsEngineResourcesRequestsArgs']]] requests: Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
if limits is not None:
pulumi.set(__self__, "limits", limits)
if requests is not None:
pulumi.set(__self__, "requests", requests)
@property
@pulumi.getter
def limits(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsEngineResourcesLimitsArgs']]]]:
"""
Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "limits")
@limits.setter
def limits(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsEngineResourcesLimitsArgs']]]]):
pulumi.set(self, "limits", value)
@property
@pulumi.getter
def requests(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsEngineResourcesRequestsArgs']]]]:
"""
Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "requests")
@requests.setter
def requests(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsEngineResourcesRequestsArgs']]]]):
pulumi.set(self, "requests", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsEngineResourcesLimitsArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsEngineResourcesRequestsArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerArgs:
def __init__(__self__, *,
config: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
container_spec: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecArgs']] = None,
endpoint: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerEndpointArgs']] = None,
env_secret_ref_name: Optional[pulumi.Input[str]] = None,
model_uri: Optional[pulumi.Input[str]] = None,
service_account_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecArgs'] container_spec: A single application container that you want to run within a pod.
"""
if config is not None:
pulumi.set(__self__, "config", config)
if container_spec is not None:
pulumi.set(__self__, "container_spec", container_spec)
if endpoint is not None:
pulumi.set(__self__, "endpoint", endpoint)
if env_secret_ref_name is not None:
pulumi.set(__self__, "env_secret_ref_name", env_secret_ref_name)
if model_uri is not None:
pulumi.set(__self__, "model_uri", model_uri)
if service_account_name is not None:
pulumi.set(__self__, "service_account_name", service_account_name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def config(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "config")
@config.setter
def config(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "config", value)
@property
@pulumi.getter(name="containerSpec")
def container_spec(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecArgs']]:
"""
A single application container that you want to run within a pod.
"""
return pulumi.get(self, "container_spec")
@container_spec.setter
def container_spec(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecArgs']]):
pulumi.set(self, "container_spec", value)
@property
@pulumi.getter
def endpoint(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerEndpointArgs']]:
return pulumi.get(self, "endpoint")
@endpoint.setter
def endpoint(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerEndpointArgs']]):
pulumi.set(self, "endpoint", value)
@property
@pulumi.getter(name="envSecretRefName")
def env_secret_ref_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "env_secret_ref_name")
@env_secret_ref_name.setter
def env_secret_ref_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "env_secret_ref_name", value)
@property
@pulumi.getter(name="modelUri")
def model_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "model_uri")
@model_uri.setter
def model_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "model_uri", value)
@property
@pulumi.getter(name="serviceAccountName")
def service_account_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_account_name")
@service_account_name.setter
def service_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account_name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
env: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvArgs']]]] = None,
env_from: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvFromArgs']]]] = None,
image: Optional[pulumi.Input[str]] = None,
image_pull_policy: Optional[pulumi.Input[str]] = None,
lifecycle: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecycleArgs']] = None,
liveness_probe: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeArgs']] = None,
ports: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecPortsArgs']]]] = None,
readiness_probe: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeArgs']] = None,
resources: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecResourcesArgs']] = None,
security_context: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextArgs']] = None,
startup_probe: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeArgs']] = None,
stdin: Optional[pulumi.Input[bool]] = None,
stdin_once: Optional[pulumi.Input[bool]] = None,
termination_message_path: Optional[pulumi.Input[str]] = None,
termination_message_policy: Optional[pulumi.Input[str]] = None,
tty: Optional[pulumi.Input[bool]] = None,
volume_devices: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecVolumeDevicesArgs']]]] = None,
volume_mounts: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecVolumeMountsArgs']]]] = None,
working_dir: Optional[pulumi.Input[str]] = None):
"""
A single application container that you want to run within a pod.
:param pulumi.Input[str] name: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
:param pulumi.Input[Sequence[pulumi.Input[str]]] args: Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvArgs']]] env: List of environment variables to set in the container. Cannot be updated.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvFromArgs']]] env_from: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.
:param pulumi.Input[str] image: Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.
:param pulumi.Input[str] image_pull_policy: Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecycleArgs'] lifecycle: Actions that the management system should take in response to container lifecycle events. Cannot be updated.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeArgs'] liveness_probe: Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecPortsArgs']]] ports: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeArgs'] readiness_probe: Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecResourcesArgs'] resources: Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextArgs'] security_context: Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeArgs'] startup_probe: StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[bool] stdin: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.
:param pulumi.Input[bool] stdin_once: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false
:param pulumi.Input[str] termination_message_path: Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.
:param pulumi.Input[str] termination_message_policy: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.
:param pulumi.Input[bool] tty: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecVolumeDevicesArgs']]] volume_devices: volumeDevices is the list of block devices to be used by the container.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecVolumeMountsArgs']]] volume_mounts: Pod volumes to mount into the container's filesystem. Cannot be updated.
:param pulumi.Input[str] working_dir: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
"""
pulumi.set(__self__, "name", name)
if args is not None:
pulumi.set(__self__, "args", args)
if command is not None:
pulumi.set(__self__, "command", command)
if env is not None:
pulumi.set(__self__, "env", env)
if env_from is not None:
pulumi.set(__self__, "env_from", env_from)
if image is not None:
pulumi.set(__self__, "image", image)
if image_pull_policy is not None:
pulumi.set(__self__, "image_pull_policy", image_pull_policy)
if lifecycle is not None:
pulumi.set(__self__, "lifecycle", lifecycle)
if liveness_probe is not None:
pulumi.set(__self__, "liveness_probe", liveness_probe)
if ports is not None:
pulumi.set(__self__, "ports", ports)
if readiness_probe is not None:
pulumi.set(__self__, "readiness_probe", readiness_probe)
if resources is not None:
pulumi.set(__self__, "resources", resources)
if security_context is not None:
pulumi.set(__self__, "security_context", security_context)
if startup_probe is not None:
pulumi.set(__self__, "startup_probe", startup_probe)
if stdin is not None:
pulumi.set(__self__, "stdin", stdin)
if stdin_once is not None:
pulumi.set(__self__, "stdin_once", stdin_once)
if termination_message_path is not None:
pulumi.set(__self__, "termination_message_path", termination_message_path)
if termination_message_policy is not None:
pulumi.set(__self__, "termination_message_policy", termination_message_policy)
if tty is not None:
pulumi.set(__self__, "tty", tty)
if volume_devices is not None:
pulumi.set(__self__, "volume_devices", volume_devices)
if volume_mounts is not None:
pulumi.set(__self__, "volume_mounts", volume_mounts)
if working_dir is not None:
pulumi.set(__self__, "working_dir", working_dir)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
"""
return pulumi.get(self, "args")
@args.setter
def args(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "args", value)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@property
@pulumi.getter
def env(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvArgs']]]]:
"""
List of environment variables to set in the container. Cannot be updated.
"""
return pulumi.get(self, "env")
@env.setter
def env(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvArgs']]]]):
pulumi.set(self, "env", value)
@property
@pulumi.getter(name="envFrom")
def env_from(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvFromArgs']]]]:
"""
List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.
"""
return pulumi.get(self, "env_from")
@env_from.setter
def env_from(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvFromArgs']]]]):
pulumi.set(self, "env_from", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input[str]]:
"""
Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.
"""
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="imagePullPolicy")
def image_pull_policy(self) -> Optional[pulumi.Input[str]]:
"""
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
"""
return pulumi.get(self, "image_pull_policy")
@image_pull_policy.setter
def image_pull_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_pull_policy", value)
@property
@pulumi.getter
def lifecycle(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecycleArgs']]:
"""
Actions that the management system should take in response to container lifecycle events. Cannot be updated.
"""
return pulumi.get(self, "lifecycle")
@lifecycle.setter
def lifecycle(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecycleArgs']]):
pulumi.set(self, "lifecycle", value)
@property
@pulumi.getter(name="livenessProbe")
def liveness_probe(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeArgs']]:
"""
Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "liveness_probe")
@liveness_probe.setter
def liveness_probe(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeArgs']]):
pulumi.set(self, "liveness_probe", value)
@property
@pulumi.getter
def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecPortsArgs']]]]:
"""
List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
"""
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecPortsArgs']]]]):
pulumi.set(self, "ports", value)
@property
@pulumi.getter(name="readinessProbe")
def readiness_probe(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeArgs']]:
"""
Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "readiness_probe")
@readiness_probe.setter
def readiness_probe(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeArgs']]):
pulumi.set(self, "readiness_probe", value)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecResourcesArgs']]:
"""
Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecResourcesArgs']]):
pulumi.set(self, "resources", value)
@property
@pulumi.getter(name="securityContext")
def security_context(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextArgs']]:
"""
Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
"""
return pulumi.get(self, "security_context")
@security_context.setter
def security_context(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextArgs']]):
pulumi.set(self, "security_context", value)
@property
@pulumi.getter(name="startupProbe")
def startup_probe(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeArgs']]:
"""
StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "startup_probe")
@startup_probe.setter
def startup_probe(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeArgs']]):
pulumi.set(self, "startup_probe", value)
@property
@pulumi.getter
def stdin(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.
"""
return pulumi.get(self, "stdin")
@stdin.setter
def stdin(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "stdin", value)
@property
@pulumi.getter(name="stdinOnce")
def stdin_once(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false
"""
return pulumi.get(self, "stdin_once")
@stdin_once.setter
def stdin_once(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "stdin_once", value)
@property
@pulumi.getter(name="terminationMessagePath")
def termination_message_path(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.
"""
return pulumi.get(self, "termination_message_path")
@termination_message_path.setter
def termination_message_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "termination_message_path", value)
@property
@pulumi.getter(name="terminationMessagePolicy")
def termination_message_policy(self) -> Optional[pulumi.Input[str]]:
"""
Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.
"""
return pulumi.get(self, "termination_message_policy")
@termination_message_policy.setter
def termination_message_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "termination_message_policy", value)
@property
@pulumi.getter
def tty(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.
"""
return pulumi.get(self, "tty")
@tty.setter
def tty(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "tty", value)
@property
@pulumi.getter(name="volumeDevices")
def volume_devices(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecVolumeDevicesArgs']]]]:
"""
volumeDevices is the list of block devices to be used by the container.
"""
return pulumi.get(self, "volume_devices")
@volume_devices.setter
def volume_devices(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecVolumeDevicesArgs']]]]):
pulumi.set(self, "volume_devices", value)
@property
@pulumi.getter(name="volumeMounts")
def volume_mounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecVolumeMountsArgs']]]]:
"""
Pod volumes to mount into the container's filesystem. Cannot be updated.
"""
return pulumi.get(self, "volume_mounts")
@volume_mounts.setter
def volume_mounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecVolumeMountsArgs']]]]):
pulumi.set(self, "volume_mounts", value)
@property
@pulumi.getter(name="workingDir")
def working_dir(self) -> Optional[pulumi.Input[str]]:
"""
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
"""
return pulumi.get(self, "working_dir")
@working_dir.setter
def working_dir(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "working_dir", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: Optional[pulumi.Input[str]] = None,
value_from: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromArgs']] = None):
"""
EnvVar represents an environment variable present in a Container.
:param pulumi.Input[str] name: Name of the environment variable. Must be a C_IDENTIFIER.
:param pulumi.Input[str] value: Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromArgs'] value_from: Source for the environment variable's value. Cannot be used if value is not empty.
"""
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
if value_from is not None:
pulumi.set(__self__, "value_from", value_from)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the environment variable. Must be a C_IDENTIFIER.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="valueFrom")
def value_from(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromArgs']]:
"""
Source for the environment variable's value. Cannot be used if value is not empty.
"""
return pulumi.get(self, "value_from")
@value_from.setter
def value_from(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromArgs']]):
pulumi.set(self, "value_from", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvFromArgs:
def __init__(__self__, *,
config_map_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvFromConfigMapRefArgs']] = None,
prefix: Optional[pulumi.Input[str]] = None,
secret_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvFromSecretRefArgs']] = None):
"""
EnvFromSource represents the source of a set of ConfigMaps
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvFromConfigMapRefArgs'] config_map_ref: The ConfigMap to select from
:param pulumi.Input[str] prefix: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvFromSecretRefArgs'] secret_ref: The Secret to select from
"""
if config_map_ref is not None:
pulumi.set(__self__, "config_map_ref", config_map_ref)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
if secret_ref is not None:
pulumi.set(__self__, "secret_ref", secret_ref)
@property
@pulumi.getter(name="configMapRef")
def config_map_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvFromConfigMapRefArgs']]:
"""
The ConfigMap to select from
"""
return pulumi.get(self, "config_map_ref")
@config_map_ref.setter
def config_map_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvFromConfigMapRefArgs']]):
pulumi.set(self, "config_map_ref", value)
@property
@pulumi.getter
def prefix(self) -> Optional[pulumi.Input[str]]:
"""
An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
"""
return pulumi.get(self, "prefix")
@prefix.setter
def prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prefix", value)
@property
@pulumi.getter(name="secretRef")
def secret_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvFromSecretRefArgs']]:
"""
The Secret to select from
"""
return pulumi.get(self, "secret_ref")
@secret_ref.setter
def secret_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvFromSecretRefArgs']]):
pulumi.set(self, "secret_ref", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvFromConfigMapRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
The ConfigMap to select from
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the ConfigMap must be defined
"""
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the ConfigMap must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvFromSecretRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
The Secret to select from
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the Secret must be defined
"""
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the Secret must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromArgs:
def __init__(__self__, *,
config_map_key_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromConfigMapKeyRefArgs']] = None,
field_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromFieldRefArgs']] = None,
resource_field_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromResourceFieldRefArgs']] = None,
secret_key_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromSecretKeyRefArgs']] = None):
"""
Source for the environment variable's value. Cannot be used if value is not empty.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromConfigMapKeyRefArgs'] config_map_key_ref: Selects a key of a ConfigMap.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromFieldRefArgs'] field_ref: Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromResourceFieldRefArgs'] resource_field_ref: Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromSecretKeyRefArgs'] secret_key_ref: Selects a key of a secret in the pod's namespace
"""
if config_map_key_ref is not None:
pulumi.set(__self__, "config_map_key_ref", config_map_key_ref)
if field_ref is not None:
pulumi.set(__self__, "field_ref", field_ref)
if resource_field_ref is not None:
pulumi.set(__self__, "resource_field_ref", resource_field_ref)
if secret_key_ref is not None:
pulumi.set(__self__, "secret_key_ref", secret_key_ref)
@property
@pulumi.getter(name="configMapKeyRef")
def config_map_key_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromConfigMapKeyRefArgs']]:
"""
Selects a key of a ConfigMap.
"""
return pulumi.get(self, "config_map_key_ref")
@config_map_key_ref.setter
def config_map_key_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromConfigMapKeyRefArgs']]):
pulumi.set(self, "config_map_key_ref", value)
@property
@pulumi.getter(name="fieldRef")
def field_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromFieldRefArgs']]:
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
"""
return pulumi.get(self, "field_ref")
@field_ref.setter
def field_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromFieldRefArgs']]):
pulumi.set(self, "field_ref", value)
@property
@pulumi.getter(name="resourceFieldRef")
def resource_field_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromResourceFieldRefArgs']]:
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
"""
return pulumi.get(self, "resource_field_ref")
@resource_field_ref.setter
def resource_field_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromResourceFieldRefArgs']]):
pulumi.set(self, "resource_field_ref", value)
@property
@pulumi.getter(name="secretKeyRef")
def secret_key_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromSecretKeyRefArgs']]:
"""
Selects a key of a secret in the pod's namespace
"""
return pulumi.get(self, "secret_key_ref")
@secret_key_ref.setter
def secret_key_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromSecretKeyRefArgs']]):
pulumi.set(self, "secret_key_ref", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromConfigMapKeyRefArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
Selects a key of a ConfigMap.
:param pulumi.Input[str] key: The key to select.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the ConfigMap or its key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key to select.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the ConfigMap or its key must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromFieldRefArgs:
def __init__(__self__, *,
field_path: pulumi.Input[str],
api_version: Optional[pulumi.Input[str]] = None):
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
:param pulumi.Input[str] field_path: Path of the field to select in the specified API version.
:param pulumi.Input[str] api_version: Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
pulumi.set(__self__, "field_path", field_path)
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> pulumi.Input[str]:
"""
Path of the field to select in the specified API version.
"""
return pulumi.get(self, "field_path")
@field_path.setter
def field_path(self, value: pulumi.Input[str]):
pulumi.set(self, "field_path", value)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromResourceFieldRefArgs:
def __init__(__self__, *,
resource: pulumi.Input[str],
container_name: Optional[pulumi.Input[str]] = None,
divisor: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromResourceFieldRefDivisorArgs']] = None):
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param pulumi.Input[str] resource: Required: resource to select
:param pulumi.Input[str] container_name: Container name: required for volumes, optional for env vars
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromResourceFieldRefDivisorArgs'] divisor: Specifies the output format of the exposed resources, defaults to "1"
"""
pulumi.set(__self__, "resource", resource)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if divisor is not None:
pulumi.set(__self__, "divisor", divisor)
@property
@pulumi.getter
def resource(self) -> pulumi.Input[str]:
"""
Required: resource to select
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: pulumi.Input[str]):
pulumi.set(self, "resource", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Container name: required for volumes, optional for env vars
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter
def divisor(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromResourceFieldRefDivisorArgs']]:
"""
Specifies the output format of the exposed resources, defaults to "1"
"""
return pulumi.get(self, "divisor")
@divisor.setter
def divisor(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromResourceFieldRefDivisorArgs']]):
pulumi.set(self, "divisor", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromResourceFieldRefDivisorArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecEnvValueFromSecretKeyRefArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
Selects a key of a secret in the pod's namespace
:param pulumi.Input[str] key: The key of the secret to select from. Must be a valid secret key.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the Secret or its key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key of the secret to select from. Must be a valid secret key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the Secret or its key must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecycleArgs:
def __init__(__self__, *,
post_start: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartArgs']] = None,
pre_stop: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopArgs']] = None):
"""
Actions that the management system should take in response to container lifecycle events. Cannot be updated.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartArgs'] post_start: PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopArgs'] pre_stop: PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
if post_start is not None:
pulumi.set(__self__, "post_start", post_start)
if pre_stop is not None:
pulumi.set(__self__, "pre_stop", pre_stop)
@property
@pulumi.getter(name="postStart")
def post_start(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartArgs']]:
"""
PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
return pulumi.get(self, "post_start")
@post_start.setter
def post_start(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartArgs']]):
pulumi.set(self, "post_start", value)
@property
@pulumi.getter(name="preStop")
def pre_stop(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopArgs']]:
"""
PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
return pulumi.get(self, "pre_stop")
@pre_stop.setter
def pre_stop(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopArgs']]):
pulumi.set(self, "pre_stop", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartExecArgs']] = None,
http_get: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartHttpGetArgs']] = None,
tcp_socket: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartTcpSocketArgs']] = None):
"""
PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input[Union[int, str]] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePostStartTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[Union[int, str]] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopExecArgs']] = None,
http_get: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopHttpGetArgs']] = None,
tcp_socket: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopTcpSocketArgs']] = None):
"""
PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input[Union[int, str]] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecLifecyclePreStopTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[Union[int, str]] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeExecArgs']] = None,
failure_threshold: Optional[pulumi.Input[int]] = None,
http_get: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeHttpGetArgs']] = None,
initial_delay_seconds: Optional[pulumi.Input[int]] = None,
period_seconds: Optional[pulumi.Input[int]] = None,
success_threshold: Optional[pulumi.Input[int]] = None,
tcp_socket: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeTcpSocketArgs']] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[int] failure_threshold: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input[int] initial_delay_seconds: Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[int] period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
:param pulumi.Input[int] success_threshold: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[int] timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if initial_delay_seconds is not None:
pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
if period_seconds is not None:
pulumi.set(__self__, "period_seconds", period_seconds)
if success_threshold is not None:
pulumi.set(__self__, "success_threshold", success_threshold)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="initialDelaySeconds")
def initial_delay_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "initial_delay_seconds")
@initial_delay_seconds.setter
def initial_delay_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial_delay_seconds", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
"""
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter(name="successThreshold")
def success_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
"""
return pulumi.get(self, "success_threshold")
@success_threshold.setter
def success_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "success_threshold", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input[Union[int, str]] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecLivenessProbeTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[Union[int, str]] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecPortsArgs:
def __init__(__self__, *,
container_port: pulumi.Input[int],
protocol: pulumi.Input[str],
host_ip: Optional[pulumi.Input[str]] = None,
host_port: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
ContainerPort represents a network port in a single container.
:param pulumi.Input[int] container_port: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.
:param pulumi.Input[str] protocol: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".
:param pulumi.Input[str] host_ip: What host IP to bind the external port to.
:param pulumi.Input[int] host_port: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.
:param pulumi.Input[str] name: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.
"""
pulumi.set(__self__, "container_port", container_port)
pulumi.set(__self__, "protocol", protocol)
if host_ip is not None:
pulumi.set(__self__, "host_ip", host_ip)
if host_port is not None:
pulumi.set(__self__, "host_port", host_port)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="containerPort")
def container_port(self) -> pulumi.Input[int]:
"""
Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.
"""
return pulumi.get(self, "container_port")
@container_port.setter
def container_port(self, value: pulumi.Input[int]):
pulumi.set(self, "container_port", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[str]:
"""
Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[str]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="hostIP")
def host_ip(self) -> Optional[pulumi.Input[str]]:
"""
What host IP to bind the external port to.
"""
return pulumi.get(self, "host_ip")
@host_ip.setter
def host_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_ip", value)
@property
@pulumi.getter(name="hostPort")
def host_port(self) -> Optional[pulumi.Input[int]]:
"""
Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.
"""
return pulumi.get(self, "host_port")
@host_port.setter
def host_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "host_port", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeExecArgs']] = None,
failure_threshold: Optional[pulumi.Input[int]] = None,
http_get: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeHttpGetArgs']] = None,
initial_delay_seconds: Optional[pulumi.Input[int]] = None,
period_seconds: Optional[pulumi.Input[int]] = None,
success_threshold: Optional[pulumi.Input[int]] = None,
tcp_socket: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeTcpSocketArgs']] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[int] failure_threshold: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input[int] initial_delay_seconds: Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[int] period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
:param pulumi.Input[int] success_threshold: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[int] timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if initial_delay_seconds is not None:
pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
if period_seconds is not None:
pulumi.set(__self__, "period_seconds", period_seconds)
if success_threshold is not None:
pulumi.set(__self__, "success_threshold", success_threshold)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="initialDelaySeconds")
def initial_delay_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "initial_delay_seconds")
@initial_delay_seconds.setter
def initial_delay_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial_delay_seconds", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
"""
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter(name="successThreshold")
def success_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
"""
return pulumi.get(self, "success_threshold")
@success_threshold.setter
def success_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "success_threshold", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input[Union[int, str]] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecReadinessProbeTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input[Union[int, str]],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[Union[int, str]] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input[Union[int, str]]:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[Union[int, str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecResourcesArgs:
def __init__(__self__, *,
limits: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecResourcesLimitsArgs']]]] = None,
requests: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecResourcesRequestsArgs']]]] = None):
"""
Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecResourcesLimitsArgs']]] limits: Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecResourcesRequestsArgs']]] requests: Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
if limits is not None:
pulumi.set(__self__, "limits", limits)
if requests is not None:
pulumi.set(__self__, "requests", requests)
@property
@pulumi.getter
def limits(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecResourcesLimitsArgs']]]]:
"""
Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "limits")
@limits.setter
def limits(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecResourcesLimitsArgs']]]]):
pulumi.set(self, "limits", value)
@property
@pulumi.getter
def requests(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecResourcesRequestsArgs']]]]:
"""
Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "requests")
@requests.setter
def requests(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecResourcesRequestsArgs']]]]):
pulumi.set(self, "requests", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecResourcesLimitsArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecResourcesRequestsArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextArgs:
def __init__(__self__, *,
allow_privilege_escalation: Optional[pulumi.Input[bool]] = None,
capabilities: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextCapabilitiesArgs']] = None,
privileged: Optional[pulumi.Input[bool]] = None,
proc_mount: Optional[pulumi.Input[str]] = None,
read_only_root_filesystem: Optional[pulumi.Input[bool]] = None,
run_as_group: Optional[pulumi.Input[int]] = None,
run_as_non_root: Optional[pulumi.Input[bool]] = None,
run_as_user: Optional[pulumi.Input[int]] = None,
se_linux_options: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextSeLinuxOptionsArgs']] = None,
windows_options: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextWindowsOptionsArgs']] = None):
"""
Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
:param pulumi.Input[bool] allow_privilege_escalation: AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextCapabilitiesArgs'] capabilities: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:param pulumi.Input[bool] privileged: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
:param pulumi.Input[str] proc_mount: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.
:param pulumi.Input[bool] read_only_root_filesystem: Whether this container has a read-only root filesystem. Default is false.
:param pulumi.Input[int] run_as_group: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[bool] run_as_non_root: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[int] run_as_user: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextSeLinuxOptionsArgs'] se_linux_options: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextWindowsOptionsArgs'] windows_options: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
if allow_privilege_escalation is not None:
pulumi.set(__self__, "allow_privilege_escalation", allow_privilege_escalation)
if capabilities is not None:
pulumi.set(__self__, "capabilities", capabilities)
if privileged is not None:
pulumi.set(__self__, "privileged", privileged)
if proc_mount is not None:
pulumi.set(__self__, "proc_mount", proc_mount)
if read_only_root_filesystem is not None:
pulumi.set(__self__, "read_only_root_filesystem", read_only_root_filesystem)
if run_as_group is not None:
pulumi.set(__self__, "run_as_group", run_as_group)
if run_as_non_root is not None:
pulumi.set(__self__, "run_as_non_root", run_as_non_root)
if run_as_user is not None:
pulumi.set(__self__, "run_as_user", run_as_user)
if se_linux_options is not None:
pulumi.set(__self__, "se_linux_options", se_linux_options)
if windows_options is not None:
pulumi.set(__self__, "windows_options", windows_options)
@property
@pulumi.getter(name="allowPrivilegeEscalation")
def allow_privilege_escalation(self) -> Optional[pulumi.Input[bool]]:
"""
AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
"""
return pulumi.get(self, "allow_privilege_escalation")
@allow_privilege_escalation.setter
def allow_privilege_escalation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_privilege_escalation", value)
@property
@pulumi.getter
def capabilities(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextCapabilitiesArgs']]:
"""
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
"""
return pulumi.get(self, "capabilities")
@capabilities.setter
def capabilities(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextCapabilitiesArgs']]):
pulumi.set(self, "capabilities", value)
@property
@pulumi.getter
def privileged(self) -> Optional[pulumi.Input[bool]]:
"""
Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
"""
return pulumi.get(self, "privileged")
@privileged.setter
def privileged(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "privileged", value)
@property
@pulumi.getter(name="procMount")
def proc_mount(self) -> Optional[pulumi.Input[str]]:
"""
procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.
"""
return pulumi.get(self, "proc_mount")
@proc_mount.setter
def proc_mount(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proc_mount", value)
@property
@pulumi.getter(name="readOnlyRootFilesystem")
def read_only_root_filesystem(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this container has a read-only root filesystem. Default is false.
"""
return pulumi.get(self, "read_only_root_filesystem")
@read_only_root_filesystem.setter
def read_only_root_filesystem(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only_root_filesystem", value)
@property
@pulumi.getter(name="runAsGroup")
def run_as_group(self) -> Optional[pulumi.Input[int]]:
"""
The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_group")
@run_as_group.setter
def run_as_group(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "run_as_group", value)
@property
@pulumi.getter(name="runAsNonRoot")
def run_as_non_root(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_non_root")
@run_as_non_root.setter
def run_as_non_root(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "run_as_non_root", value)
@property
@pulumi.getter(name="runAsUser")
def run_as_user(self) -> Optional[pulumi.Input[int]]:
"""
The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_user")
@run_as_user.setter
def run_as_user(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "run_as_user", value)
@property
@pulumi.getter(name="seLinuxOptions")
def se_linux_options(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextSeLinuxOptionsArgs']]:
"""
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "se_linux_options")
@se_linux_options.setter
def se_linux_options(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextSeLinuxOptionsArgs']]):
pulumi.set(self, "se_linux_options", value)
@property
@pulumi.getter(name="windowsOptions")
def windows_options(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextWindowsOptionsArgs']]:
"""
The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "windows_options")
@windows_options.setter
def windows_options(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextWindowsOptionsArgs']]):
pulumi.set(self, "windows_options", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextCapabilitiesArgs:
def __init__(__self__, *,
add: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
drop: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:param pulumi.Input[Sequence[pulumi.Input[str]]] add: Added capabilities
:param pulumi.Input[Sequence[pulumi.Input[str]]] drop: Removed capabilities
"""
if add is not None:
pulumi.set(__self__, "add", add)
if drop is not None:
pulumi.set(__self__, "drop", drop)
@property
@pulumi.getter
def add(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Added capabilities
"""
return pulumi.get(self, "add")
@add.setter
def add(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "add", value)
@property
@pulumi.getter
def drop(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Removed capabilities
"""
return pulumi.get(self, "drop")
@drop.setter
def drop(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "drop", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextSeLinuxOptionsArgs:
def __init__(__self__, *,
level: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None):
"""
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[str] level: Level is SELinux level label that applies to the container.
:param pulumi.Input[str] role: Role is a SELinux role label that applies to the container.
:param pulumi.Input[str] type: Type is a SELinux type label that applies to the container.
:param pulumi.Input[str] user: User is a SELinux user label that applies to the container.
"""
if level is not None:
pulumi.set(__self__, "level", level)
if role is not None:
pulumi.set(__self__, "role", role)
if type is not None:
pulumi.set(__self__, "type", type)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter
def level(self) -> Optional[pulumi.Input[str]]:
"""
Level is SELinux level label that applies to the container.
"""
return pulumi.get(self, "level")
@level.setter
def level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "level", value)
@property
@pulumi.getter
def role(self) -> Optional[pulumi.Input[str]]:
"""
Role is a SELinux role label that applies to the container.
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Type is a SELinux type label that applies to the container.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
"""
User is a SELinux user label that applies to the container.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecSecurityContextWindowsOptionsArgs:
def __init__(__self__, *,
gmsa_credential_spec: Optional[pulumi.Input[str]] = None,
gmsa_credential_spec_name: Optional[pulumi.Input[str]] = None,
run_as_user_name: Optional[pulumi.Input[str]] = None):
"""
The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param pulumi.Input[str] gmsa_credential_spec: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.
:param pulumi.Input[str] gmsa_credential_spec_name: GMSACredentialSpecName is the name of the GMSA credential spec to use.
:param pulumi.Input[str] run_as_user_name: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
if gmsa_credential_spec is not None:
pulumi.set(__self__, "gmsa_credential_spec", gmsa_credential_spec)
if gmsa_credential_spec_name is not None:
pulumi.set(__self__, "gmsa_credential_spec_name", gmsa_credential_spec_name)
if run_as_user_name is not None:
pulumi.set(__self__, "run_as_user_name", run_as_user_name)
@property
@pulumi.getter(name="gmsaCredentialSpec")
def gmsa_credential_spec(self) -> Optional[pulumi.Input[str]]:
"""
GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.
"""
return pulumi.get(self, "gmsa_credential_spec")
@gmsa_credential_spec.setter
def gmsa_credential_spec(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "gmsa_credential_spec", value)
@property
@pulumi.getter(name="gmsaCredentialSpecName")
def gmsa_credential_spec_name(self) -> Optional[pulumi.Input[str]]:
"""
GMSACredentialSpecName is the name of the GMSA credential spec to use.
"""
return pulumi.get(self, "gmsa_credential_spec_name")
@gmsa_credential_spec_name.setter
def gmsa_credential_spec_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "gmsa_credential_spec_name", value)
@property
@pulumi.getter(name="runAsUserName")
def run_as_user_name(self) -> Optional[pulumi.Input[str]]:
"""
The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_user_name")
@run_as_user_name.setter
def run_as_user_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "run_as_user_name", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeExecArgs']] = None,
failure_threshold: Optional[pulumi.Input[int]] = None,
http_get: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeHttpGetArgs']] = None,
initial_delay_seconds: Optional[pulumi.Input[int]] = None,
period_seconds: Optional[pulumi.Input[int]] = None,
success_threshold: Optional[pulumi.Input[int]] = None,
tcp_socket: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeTcpSocketArgs']] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[int] failure_threshold: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input[int] initial_delay_seconds: Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[int] period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
:param pulumi.Input[int] success_threshold: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[int] timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if initial_delay_seconds is not None:
pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
if period_seconds is not None:
pulumi.set(__self__, "period_seconds", period_seconds)
if success_threshold is not None:
pulumi.set(__self__, "success_threshold", success_threshold)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="initialDelaySeconds")
def initial_delay_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "initial_delay_seconds")
@initial_delay_seconds.setter
def initial_delay_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial_delay_seconds", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
"""
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter(name="successThreshold")
def success_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
"""
return pulumi.get(self, "success_threshold")
@success_threshold.setter
def success_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "success_threshold", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeHttpGetPortArgs'],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeHttpGetPortArgs'] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeHttpGetPortArgs']:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeHttpGetPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeHttpGetPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeTcpSocketPortArgs'],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeTcpSocketPortArgs'] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeTcpSocketPortArgs']:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeTcpSocketPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecStartupProbeTcpSocketPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecVolumeDevicesArgs:
def __init__(__self__, *,
device_path: pulumi.Input[str],
name: pulumi.Input[str]):
"""
volumeDevice describes a mapping of a raw block device within a container.
:param pulumi.Input[str] device_path: devicePath is the path inside of the container that the device will be mapped to.
:param pulumi.Input[str] name: name must match the name of a persistentVolumeClaim in the pod
"""
pulumi.set(__self__, "device_path", device_path)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="devicePath")
def device_path(self) -> pulumi.Input[str]:
"""
devicePath is the path inside of the container that the device will be mapped to.
"""
return pulumi.get(self, "device_path")
@device_path.setter
def device_path(self, value: pulumi.Input[str]):
pulumi.set(self, "device_path", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
name must match the name of a persistentVolumeClaim in the pod
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerContainerSpecVolumeMountsArgs:
def __init__(__self__, *,
mount_path: pulumi.Input[str],
name: pulumi.Input[str],
mount_propagation: Optional[pulumi.Input[str]] = None,
read_only: Optional[pulumi.Input[bool]] = None,
sub_path: Optional[pulumi.Input[str]] = None,
sub_path_expr: Optional[pulumi.Input[str]] = None):
"""
VolumeMount describes a mounting of a Volume within a container.
:param pulumi.Input[str] mount_path: Path within the container at which the volume should be mounted. Must not contain ':'.
:param pulumi.Input[str] name: This must match the Name of a Volume.
:param pulumi.Input[str] mount_propagation: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
:param pulumi.Input[bool] read_only: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
:param pulumi.Input[str] sub_path: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
:param pulumi.Input[str] sub_path_expr: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.
"""
pulumi.set(__self__, "mount_path", mount_path)
pulumi.set(__self__, "name", name)
if mount_propagation is not None:
pulumi.set(__self__, "mount_propagation", mount_propagation)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if sub_path is not None:
pulumi.set(__self__, "sub_path", sub_path)
if sub_path_expr is not None:
pulumi.set(__self__, "sub_path_expr", sub_path_expr)
@property
@pulumi.getter(name="mountPath")
def mount_path(self) -> pulumi.Input[str]:
"""
Path within the container at which the volume should be mounted. Must not contain ':'.
"""
return pulumi.get(self, "mount_path")
@mount_path.setter
def mount_path(self, value: pulumi.Input[str]):
pulumi.set(self, "mount_path", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
This must match the Name of a Volume.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="mountPropagation")
def mount_propagation(self) -> Optional[pulumi.Input[str]]:
"""
mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
"""
return pulumi.get(self, "mount_propagation")
@mount_propagation.setter
def mount_propagation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mount_propagation", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@property
@pulumi.getter(name="subPath")
def sub_path(self) -> Optional[pulumi.Input[str]]:
"""
Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
"""
return pulumi.get(self, "sub_path")
@sub_path.setter
def sub_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sub_path", value)
@property
@pulumi.getter(name="subPathExpr")
def sub_path_expr(self) -> Optional[pulumi.Input[str]]:
"""
Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.
"""
return pulumi.get(self, "sub_path_expr")
@sub_path_expr.setter
def sub_path_expr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sub_path_expr", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsExplainerEndpointArgs:
def __init__(__self__, *,
grpc_port: Optional[pulumi.Input[int]] = None,
http_port: Optional[pulumi.Input[int]] = None,
service_host: Optional[pulumi.Input[str]] = None,
service_port: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None):
if grpc_port is not None:
pulumi.set(__self__, "grpc_port", grpc_port)
if http_port is not None:
pulumi.set(__self__, "http_port", http_port)
if service_host is not None:
pulumi.set(__self__, "service_host", service_host)
if service_port is not None:
pulumi.set(__self__, "service_port", service_port)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="grpcPort")
def grpc_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "grpc_port")
@grpc_port.setter
def grpc_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "grpc_port", value)
@property
@pulumi.getter(name="httpPort")
def http_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "http_port")
@http_port.setter
def http_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "http_port", value)
@property
@pulumi.getter
def service_host(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_host")
@service_host.setter
def service_host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_host", value)
@property
@pulumi.getter
def service_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "service_port")
@service_port.setter
def service_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "service_port", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsGraphArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
children: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenArgs']]]] = None,
endpoint: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphEndpointArgs']] = None,
env_secret_ref_name: Optional[pulumi.Input[str]] = None,
implementation: Optional[pulumi.Input[str]] = None,
logger: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphLoggerArgs']] = None,
methods: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
model_uri: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphParametersArgs']]]] = None,
service_account_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input['SeldonDeploymentSpecPredictorsGraphLoggerArgs'] logger: Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
"""
pulumi.set(__self__, "name", name)
if children is not None:
pulumi.set(__self__, "children", children)
if endpoint is not None:
pulumi.set(__self__, "endpoint", endpoint)
if env_secret_ref_name is not None:
pulumi.set(__self__, "env_secret_ref_name", env_secret_ref_name)
if implementation is not None:
pulumi.set(__self__, "implementation", implementation)
if logger is not None:
pulumi.set(__self__, "logger", logger)
if methods is not None:
pulumi.set(__self__, "methods", methods)
if model_uri is not None:
pulumi.set(__self__, "model_uri", model_uri)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if service_account_name is not None:
pulumi.set(__self__, "service_account_name", service_account_name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def children(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenArgs']]]]:
return pulumi.get(self, "children")
@children.setter
def children(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenArgs']]]]):
pulumi.set(self, "children", value)
@property
@pulumi.getter
def endpoint(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphEndpointArgs']]:
return pulumi.get(self, "endpoint")
@endpoint.setter
def endpoint(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphEndpointArgs']]):
pulumi.set(self, "endpoint", value)
@property
@pulumi.getter(name="envSecretRefName")
def env_secret_ref_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "env_secret_ref_name")
@env_secret_ref_name.setter
def env_secret_ref_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "env_secret_ref_name", value)
@property
@pulumi.getter
def implementation(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "implementation")
@implementation.setter
def implementation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "implementation", value)
@property
@pulumi.getter
def logger(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphLoggerArgs']]:
"""
Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
"""
return pulumi.get(self, "logger")
@logger.setter
def logger(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphLoggerArgs']]):
pulumi.set(self, "logger", value)
@property
@pulumi.getter
def methods(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "methods")
@methods.setter
def methods(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "methods", value)
@property
@pulumi.getter(name="modelUri")
def model_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "model_uri")
@model_uri.setter
def model_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "model_uri", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphParametersArgs']]]]:
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphParametersArgs']]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="serviceAccountName")
def service_account_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_account_name")
@service_account_name.setter
def service_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account_name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsGraphChildrenArgs:
def __init__(__self__, *,
children: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenArgs']]]] = None,
endpoint: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenEndpointArgs']] = None,
env_secret_ref_name: Optional[pulumi.Input[str]] = None,
implementation: Optional[pulumi.Input[str]] = None,
logger: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenLoggerArgs']] = None,
methods: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
model_uri: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenParametersArgs']]]] = None,
service_account_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenLoggerArgs'] logger: Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
"""
if children is not None:
pulumi.set(__self__, "children", children)
if endpoint is not None:
pulumi.set(__self__, "endpoint", endpoint)
if env_secret_ref_name is not None:
pulumi.set(__self__, "env_secret_ref_name", env_secret_ref_name)
if implementation is not None:
pulumi.set(__self__, "implementation", implementation)
if logger is not None:
pulumi.set(__self__, "logger", logger)
if methods is not None:
pulumi.set(__self__, "methods", methods)
if model_uri is not None:
pulumi.set(__self__, "model_uri", model_uri)
if name is not None:
pulumi.set(__self__, "name", name)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if service_account_name is not None:
pulumi.set(__self__, "service_account_name", service_account_name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def children(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenArgs']]]]:
return pulumi.get(self, "children")
@children.setter
def children(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenArgs']]]]):
pulumi.set(self, "children", value)
@property
@pulumi.getter
def endpoint(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenEndpointArgs']]:
return pulumi.get(self, "endpoint")
@endpoint.setter
def endpoint(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenEndpointArgs']]):
pulumi.set(self, "endpoint", value)
@property
@pulumi.getter(name="envSecretRefName")
def env_secret_ref_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "env_secret_ref_name")
@env_secret_ref_name.setter
def env_secret_ref_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "env_secret_ref_name", value)
@property
@pulumi.getter
def implementation(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "implementation")
@implementation.setter
def implementation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "implementation", value)
@property
@pulumi.getter
def logger(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenLoggerArgs']]:
"""
Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
"""
return pulumi.get(self, "logger")
@logger.setter
def logger(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenLoggerArgs']]):
pulumi.set(self, "logger", value)
@property
@pulumi.getter
def methods(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "methods")
@methods.setter
def methods(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "methods", value)
@property
@pulumi.getter(name="modelUri")
def model_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "model_uri")
@model_uri.setter
def model_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "model_uri", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenParametersArgs']]]]:
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenParametersArgs']]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="serviceAccountName")
def service_account_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_account_name")
@service_account_name.setter
def service_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account_name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsGraphChildrenChildrenArgs:
def __init__(__self__, *,
children: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenArgs']]]] = None,
endpoint: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenEndpointArgs']] = None,
env_secret_ref_name: Optional[pulumi.Input[str]] = None,
implementation: Optional[pulumi.Input[str]] = None,
logger: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenLoggerArgs']] = None,
methods: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
model_uri: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenParametersArgs']]]] = None,
service_account_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenLoggerArgs'] logger: Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
"""
if children is not None:
pulumi.set(__self__, "children", children)
if endpoint is not None:
pulumi.set(__self__, "endpoint", endpoint)
if env_secret_ref_name is not None:
pulumi.set(__self__, "env_secret_ref_name", env_secret_ref_name)
if implementation is not None:
pulumi.set(__self__, "implementation", implementation)
if logger is not None:
pulumi.set(__self__, "logger", logger)
if methods is not None:
pulumi.set(__self__, "methods", methods)
if model_uri is not None:
pulumi.set(__self__, "model_uri", model_uri)
if name is not None:
pulumi.set(__self__, "name", name)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if service_account_name is not None:
pulumi.set(__self__, "service_account_name", service_account_name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def children(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenArgs']]]]:
return pulumi.get(self, "children")
@children.setter
def children(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenArgs']]]]):
pulumi.set(self, "children", value)
@property
@pulumi.getter
def endpoint(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenEndpointArgs']]:
return pulumi.get(self, "endpoint")
@endpoint.setter
def endpoint(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenEndpointArgs']]):
pulumi.set(self, "endpoint", value)
@property
@pulumi.getter(name="envSecretRefName")
def env_secret_ref_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "env_secret_ref_name")
@env_secret_ref_name.setter
def env_secret_ref_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "env_secret_ref_name", value)
@property
@pulumi.getter
def implementation(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "implementation")
@implementation.setter
def implementation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "implementation", value)
@property
@pulumi.getter
def logger(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenLoggerArgs']]:
"""
Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
"""
return pulumi.get(self, "logger")
@logger.setter
def logger(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenLoggerArgs']]):
pulumi.set(self, "logger", value)
@property
@pulumi.getter
def methods(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "methods")
@methods.setter
def methods(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "methods", value)
@property
@pulumi.getter(name="modelUri")
def model_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "model_uri")
@model_uri.setter
def model_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "model_uri", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenParametersArgs']]]]:
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenParametersArgs']]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="serviceAccountName")
def service_account_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_account_name")
@service_account_name.setter
def service_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account_name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenArgs:
def __init__(__self__, *,
children: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenArgs']]]] = None,
endpoint: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenEndpointArgs']] = None,
env_secret_ref_name: Optional[pulumi.Input[str]] = None,
implementation: Optional[pulumi.Input[str]] = None,
logger: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenLoggerArgs']] = None,
methods: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
model_uri: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenParametersArgs']]]] = None,
service_account_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenLoggerArgs'] logger: Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
"""
if children is not None:
pulumi.set(__self__, "children", children)
if endpoint is not None:
pulumi.set(__self__, "endpoint", endpoint)
if env_secret_ref_name is not None:
pulumi.set(__self__, "env_secret_ref_name", env_secret_ref_name)
if implementation is not None:
pulumi.set(__self__, "implementation", implementation)
if logger is not None:
pulumi.set(__self__, "logger", logger)
if methods is not None:
pulumi.set(__self__, "methods", methods)
if model_uri is not None:
pulumi.set(__self__, "model_uri", model_uri)
if name is not None:
pulumi.set(__self__, "name", name)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if service_account_name is not None:
pulumi.set(__self__, "service_account_name", service_account_name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def children(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenArgs']]]]:
return pulumi.get(self, "children")
@children.setter
def children(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenArgs']]]]):
pulumi.set(self, "children", value)
@property
@pulumi.getter
def endpoint(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenEndpointArgs']]:
return pulumi.get(self, "endpoint")
@endpoint.setter
def endpoint(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenEndpointArgs']]):
pulumi.set(self, "endpoint", value)
@property
@pulumi.getter(name="envSecretRefName")
def env_secret_ref_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "env_secret_ref_name")
@env_secret_ref_name.setter
def env_secret_ref_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "env_secret_ref_name", value)
@property
@pulumi.getter
def implementation(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "implementation")
@implementation.setter
def implementation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "implementation", value)
@property
@pulumi.getter
def logger(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenLoggerArgs']]:
"""
Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
"""
return pulumi.get(self, "logger")
@logger.setter
def logger(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenLoggerArgs']]):
pulumi.set(self, "logger", value)
@property
@pulumi.getter
def methods(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "methods")
@methods.setter
def methods(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "methods", value)
@property
@pulumi.getter(name="modelUri")
def model_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "model_uri")
@model_uri.setter
def model_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "model_uri", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenParametersArgs']]]]:
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenParametersArgs']]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="serviceAccountName")
def service_account_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_account_name")
@service_account_name.setter
def service_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account_name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenArgs:
def __init__(__self__, *,
endpoint: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenEndpointArgs']] = None,
env_secret_ref_name: Optional[pulumi.Input[str]] = None,
implementation: Optional[pulumi.Input[str]] = None,
logger: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenLoggerArgs']] = None,
methods: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
model_uri: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenParametersArgs']]]] = None,
service_account_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenLoggerArgs'] logger: Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
"""
if endpoint is not None:
pulumi.set(__self__, "endpoint", endpoint)
if env_secret_ref_name is not None:
pulumi.set(__self__, "env_secret_ref_name", env_secret_ref_name)
if implementation is not None:
pulumi.set(__self__, "implementation", implementation)
if logger is not None:
pulumi.set(__self__, "logger", logger)
if methods is not None:
pulumi.set(__self__, "methods", methods)
if model_uri is not None:
pulumi.set(__self__, "model_uri", model_uri)
if name is not None:
pulumi.set(__self__, "name", name)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if service_account_name is not None:
pulumi.set(__self__, "service_account_name", service_account_name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def endpoint(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenEndpointArgs']]:
return pulumi.get(self, "endpoint")
@endpoint.setter
def endpoint(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenEndpointArgs']]):
pulumi.set(self, "endpoint", value)
@property
@pulumi.getter(name="envSecretRefName")
def env_secret_ref_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "env_secret_ref_name")
@env_secret_ref_name.setter
def env_secret_ref_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "env_secret_ref_name", value)
@property
@pulumi.getter
def implementation(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "implementation")
@implementation.setter
def implementation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "implementation", value)
@property
@pulumi.getter
def logger(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenLoggerArgs']]:
"""
Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
"""
return pulumi.get(self, "logger")
@logger.setter
def logger(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenLoggerArgs']]):
pulumi.set(self, "logger", value)
@property
@pulumi.getter
def methods(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "methods")
@methods.setter
def methods(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "methods", value)
@property
@pulumi.getter(name="modelUri")
def model_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "model_uri")
@model_uri.setter
def model_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "model_uri", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenParametersArgs']]]]:
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenParametersArgs']]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="serviceAccountName")
def service_account_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_account_name")
@service_account_name.setter
def service_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account_name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenEndpointArgs:
def __init__(__self__, *,
service_host: Optional[pulumi.Input[str]] = None,
service_port: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None):
if service_host is not None:
pulumi.set(__self__, "service_host", service_host)
if service_port is not None:
pulumi.set(__self__, "service_port", service_port)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def service_host(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_host")
@service_host.setter
def service_host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_host", value)
@property
@pulumi.getter
def service_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "service_port")
@service_port.setter
def service_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "service_port", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenLoggerArgs:
def __init__(__self__, *,
mode: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None):
"""
Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
:param pulumi.Input[str] mode: What payloads to log
:param pulumi.Input[str] url: URL to send request logging CloudEvents
"""
if mode is not None:
pulumi.set(__self__, "mode", mode)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[str]]:
"""
What payloads to log
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
URL to send request logging CloudEvents
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenChildrenParametersArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
if name is not None:
pulumi.set(__self__, "name", name)
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenEndpointArgs:
def __init__(__self__, *,
service_host: Optional[pulumi.Input[str]] = None,
service_port: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None):
if service_host is not None:
pulumi.set(__self__, "service_host", service_host)
if service_port is not None:
pulumi.set(__self__, "service_port", service_port)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def service_host(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_host")
@service_host.setter
def service_host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_host", value)
@property
@pulumi.getter
def service_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "service_port")
@service_port.setter
def service_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "service_port", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenLoggerArgs:
def __init__(__self__, *,
mode: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None):
"""
Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
:param pulumi.Input[str] mode: What payloads to log
:param pulumi.Input[str] url: URL to send request logging CloudEvents
"""
if mode is not None:
pulumi.set(__self__, "mode", mode)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[str]]:
"""
What payloads to log
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
URL to send request logging CloudEvents
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsGraphChildrenChildrenChildrenParametersArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
if name is not None:
pulumi.set(__self__, "name", name)
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsGraphChildrenChildrenEndpointArgs:
def __init__(__self__, *,
service_host: Optional[pulumi.Input[str]] = None,
service_port: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None):
if service_host is not None:
pulumi.set(__self__, "service_host", service_host)
if service_port is not None:
pulumi.set(__self__, "service_port", service_port)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def service_host(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_host")
@service_host.setter
def service_host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_host", value)
@property
@pulumi.getter
def service_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "service_port")
@service_port.setter
def service_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "service_port", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsGraphChildrenChildrenLoggerArgs:
def __init__(__self__, *,
mode: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None):
"""
Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
:param pulumi.Input[str] mode: What payloads to log
:param pulumi.Input[str] url: URL to send request logging CloudEvents
"""
if mode is not None:
pulumi.set(__self__, "mode", mode)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[str]]:
"""
What payloads to log
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
URL to send request logging CloudEvents
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsGraphChildrenChildrenParametersArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
if name is not None:
pulumi.set(__self__, "name", name)
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsGraphChildrenEndpointArgs:
def __init__(__self__, *,
service_host: Optional[pulumi.Input[str]] = None,
service_port: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None):
if service_host is not None:
pulumi.set(__self__, "service_host", service_host)
if service_port is not None:
pulumi.set(__self__, "service_port", service_port)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def service_host(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_host")
@service_host.setter
def service_host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_host", value)
@property
@pulumi.getter
def service_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "service_port")
@service_port.setter
def service_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "service_port", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsGraphChildrenLoggerArgs:
def __init__(__self__, *,
mode: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None):
"""
Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
:param pulumi.Input[str] mode: What payloads to log
:param pulumi.Input[str] url: URL to send request logging CloudEvents
"""
if mode is not None:
pulumi.set(__self__, "mode", mode)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[str]]:
"""
What payloads to log
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
URL to send request logging CloudEvents
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsGraphChildrenParametersArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
if name is not None:
pulumi.set(__self__, "name", name)
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsGraphEndpointArgs:
def __init__(__self__, *,
grpc_port: Optional[pulumi.Input[int]] = None,
http_port: Optional[pulumi.Input[int]] = None,
service_host: Optional[pulumi.Input[str]] = None,
service_port: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None):
if grpc_port is not None:
pulumi.set(__self__, "grpc_port", grpc_port)
if http_port is not None:
pulumi.set(__self__, "http_port", http_port)
if service_host is not None:
pulumi.set(__self__, "service_host", service_host)
if service_port is not None:
pulumi.set(__self__, "service_port", service_port)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="grpcPort")
def grpc_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "grpc_port")
@grpc_port.setter
def grpc_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "grpc_port", value)
@property
@pulumi.getter(name="httpPort")
def http_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "http_port")
@http_port.setter
def http_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "http_port", value)
@property
@pulumi.getter
def service_host(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "service_host")
@service_host.setter
def service_host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_host", value)
@property
@pulumi.getter
def service_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "service_port")
@service_port.setter
def service_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "service_port", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsGraphLoggerArgs:
def __init__(__self__, *,
mode: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None):
"""
Request/response payload logging. v2alpha1 feature that is added to v1 for backwards compatibility while v1 is the storage version.
:param pulumi.Input[str] mode: What payloads to log
:param pulumi.Input[str] url: URL to send request logging CloudEvents
"""
if mode is not None:
pulumi.set(__self__, "mode", mode)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[str]]:
"""
What payloads to log
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
URL to send request logging CloudEvents
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsGraphParametersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
type: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsSslArgs:
def __init__(__self__, *,
cert_secret_name: Optional[pulumi.Input[str]] = None):
if cert_secret_name is not None:
pulumi.set(__self__, "cert_secret_name", cert_secret_name)
@property
@pulumi.getter(name="certSecretName")
def cert_secret_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cert_secret_name")
@cert_secret_name.setter
def cert_secret_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cert_secret_name", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsSvcOrchSpecArgs:
def __init__(__self__, *,
env: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvArgs']]]] = None,
replicas: Optional[pulumi.Input[int]] = None,
resources: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesArgs']] = None):
"""
:param pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesArgs'] resources: ResourceRequirements describes the compute resource requirements.
"""
if env is not None:
pulumi.set(__self__, "env", env)
if replicas is not None:
pulumi.set(__self__, "replicas", replicas)
if resources is not None:
pulumi.set(__self__, "resources", resources)
@property
@pulumi.getter
def env(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvArgs']]]]:
return pulumi.get(self, "env")
@env.setter
def env(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvArgs']]]]):
pulumi.set(self, "env", value)
@property
@pulumi.getter
def replicas(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "replicas")
@replicas.setter
def replicas(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replicas", value)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesArgs']]:
"""
ResourceRequirements describes the compute resource requirements.
"""
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesArgs']]):
pulumi.set(self, "resources", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsSvcOrchSpecEnvArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: Optional[pulumi.Input[str]] = None,
value_from: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromArgs']] = None):
"""
EnvVar represents an environment variable present in a Container.
:param pulumi.Input[str] name: Name of the environment variable. Must be a C_IDENTIFIER.
:param pulumi.Input[str] value: Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".
:param pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromArgs'] value_from: Source for the environment variable's value. Cannot be used if value is not empty.
"""
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
if value_from is not None:
pulumi.set(__self__, "value_from", value_from)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the environment variable. Must be a C_IDENTIFIER.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="valueFrom")
def value_from(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromArgs']]:
"""
Source for the environment variable's value. Cannot be used if value is not empty.
"""
return pulumi.get(self, "value_from")
@value_from.setter
def value_from(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromArgs']]):
pulumi.set(self, "value_from", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromArgs:
def __init__(__self__, *,
config_map_key_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromConfigMapKeyRefArgs']] = None,
field_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromFieldRefArgs']] = None,
resource_field_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromResourceFieldRefArgs']] = None,
secret_key_ref: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromSecretKeyRefArgs']] = None):
"""
Source for the environment variable's value. Cannot be used if value is not empty.
:param pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromConfigMapKeyRefArgs'] config_map_key_ref: Selects a key of a ConfigMap.
:param pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromFieldRefArgs'] field_ref: Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
:param pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromResourceFieldRefArgs'] resource_field_ref: Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromSecretKeyRefArgs'] secret_key_ref: Selects a key of a secret in the pod's namespace
"""
if config_map_key_ref is not None:
pulumi.set(__self__, "config_map_key_ref", config_map_key_ref)
if field_ref is not None:
pulumi.set(__self__, "field_ref", field_ref)
if resource_field_ref is not None:
pulumi.set(__self__, "resource_field_ref", resource_field_ref)
if secret_key_ref is not None:
pulumi.set(__self__, "secret_key_ref", secret_key_ref)
@property
@pulumi.getter(name="configMapKeyRef")
def config_map_key_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromConfigMapKeyRefArgs']]:
"""
Selects a key of a ConfigMap.
"""
return pulumi.get(self, "config_map_key_ref")
@config_map_key_ref.setter
def config_map_key_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromConfigMapKeyRefArgs']]):
pulumi.set(self, "config_map_key_ref", value)
@property
@pulumi.getter(name="fieldRef")
def field_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromFieldRefArgs']]:
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
"""
return pulumi.get(self, "field_ref")
@field_ref.setter
def field_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromFieldRefArgs']]):
pulumi.set(self, "field_ref", value)
@property
@pulumi.getter(name="resourceFieldRef")
def resource_field_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromResourceFieldRefArgs']]:
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
"""
return pulumi.get(self, "resource_field_ref")
@resource_field_ref.setter
def resource_field_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromResourceFieldRefArgs']]):
pulumi.set(self, "resource_field_ref", value)
@property
@pulumi.getter(name="secretKeyRef")
def secret_key_ref(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromSecretKeyRefArgs']]:
"""
Selects a key of a secret in the pod's namespace
"""
return pulumi.get(self, "secret_key_ref")
@secret_key_ref.setter
def secret_key_ref(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromSecretKeyRefArgs']]):
pulumi.set(self, "secret_key_ref", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromConfigMapKeyRefArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
Selects a key of a ConfigMap.
:param pulumi.Input[str] key: The key to select.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the ConfigMap or its key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key to select.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the ConfigMap or its key must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromFieldRefArgs:
def __init__(__self__, *,
field_path: pulumi.Input[str],
api_version: Optional[pulumi.Input[str]] = None):
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
:param pulumi.Input[str] field_path: Path of the field to select in the specified API version.
:param pulumi.Input[str] api_version: Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
pulumi.set(__self__, "field_path", field_path)
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> pulumi.Input[str]:
"""
Path of the field to select in the specified API version.
"""
return pulumi.get(self, "field_path")
@field_path.setter
def field_path(self, value: pulumi.Input[str]):
pulumi.set(self, "field_path", value)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromResourceFieldRefArgs:
def __init__(__self__, *,
resource: pulumi.Input[str],
container_name: Optional[pulumi.Input[str]] = None,
divisor: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromResourceFieldRefDivisorArgs']] = None):
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param pulumi.Input[str] resource: Required: resource to select
:param pulumi.Input[str] container_name: Container name: required for volumes, optional for env vars
:param pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromResourceFieldRefDivisorArgs'] divisor: Specifies the output format of the exposed resources, defaults to "1"
"""
pulumi.set(__self__, "resource", resource)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if divisor is not None:
pulumi.set(__self__, "divisor", divisor)
@property
@pulumi.getter
def resource(self) -> pulumi.Input[str]:
"""
Required: resource to select
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: pulumi.Input[str]):
pulumi.set(self, "resource", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Container name: required for volumes, optional for env vars
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter
def divisor(self) -> Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromResourceFieldRefDivisorArgs']]:
"""
Specifies the output format of the exposed resources, defaults to "1"
"""
return pulumi.get(self, "divisor")
@divisor.setter
def divisor(self, value: Optional[pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromResourceFieldRefDivisorArgs']]):
pulumi.set(self, "divisor", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromResourceFieldRefDivisorArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsSvcOrchSpecEnvValueFromSecretKeyRefArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
Selects a key of a secret in the pod's namespace
:param pulumi.Input[str] key: The key of the secret to select from. Must be a valid secret key.
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param pulumi.Input[bool] optional: Specify whether the Secret or its key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The key of the secret to select from. Must be a valid secret key.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the Secret or its key must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesArgs:
def __init__(__self__, *,
limits: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesLimitsArgs']]]] = None,
requests: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesRequestsArgs']]]] = None):
"""
ResourceRequirements describes the compute resource requirements.
:param pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesLimitsArgs']]] limits: Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesRequestsArgs']]] requests: Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
if limits is not None:
pulumi.set(__self__, "limits", limits)
if requests is not None:
pulumi.set(__self__, "requests", requests)
@property
@pulumi.getter
def limits(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesLimitsArgs']]]]:
"""
Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "limits")
@limits.setter
def limits(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesLimitsArgs']]]]):
pulumi.set(self, "limits", value)
@property
@pulumi.getter
def requests(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesRequestsArgs']]]]:
"""
Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "requests")
@requests.setter
def requests(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesRequestsArgs']]]]):
pulumi.set(self, "requests", value)
@pulumi.input_type
class SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesLimitsArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentSpecPredictorsSvcOrchSpecResourcesRequestsArgs:
def __init__(__self__):
pass
@pulumi.input_type
class SeldonDeploymentStatusArgs:
def __init__(__self__, *,
address: Optional[pulumi.Input['SeldonDeploymentStatusAddressArgs']] = None,
deployment_status: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentStatusDeploymentStatusArgs']]]] = None,
description: Optional[pulumi.Input[str]] = None,
replicas: Optional[pulumi.Input[int]] = None,
service_status: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentStatusServiceStatusArgs']]]] = None,
state: Optional[pulumi.Input[str]] = None):
"""
SeldonDeploymentStatus defines the observed state of SeldonDeployment
:param pulumi.Input['SeldonDeploymentStatusAddressArgs'] address: Addressable placeholder until duckv1 issue is fixed: https://github.com/kubernetes-sigs/controller-tools/issues/391
"""
if address is not None:
pulumi.set(__self__, "address", address)
if deployment_status is not None:
pulumi.set(__self__, "deployment_status", deployment_status)
if description is not None:
pulumi.set(__self__, "description", description)
if replicas is not None:
pulumi.set(__self__, "replicas", replicas)
if service_status is not None:
pulumi.set(__self__, "service_status", service_status)
if state is not None:
pulumi.set(__self__, "state", state)
@property
@pulumi.getter
def address(self) -> Optional[pulumi.Input['SeldonDeploymentStatusAddressArgs']]:
"""
Addressable placeholder until duckv1 issue is fixed: https://github.com/kubernetes-sigs/controller-tools/issues/391
"""
return pulumi.get(self, "address")
@address.setter
def address(self, value: Optional[pulumi.Input['SeldonDeploymentStatusAddressArgs']]):
pulumi.set(self, "address", value)
@property
@pulumi.getter(name="deploymentStatus")
def deployment_status(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentStatusDeploymentStatusArgs']]]]:
return pulumi.get(self, "deployment_status")
@deployment_status.setter
def deployment_status(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentStatusDeploymentStatusArgs']]]]):
pulumi.set(self, "deployment_status", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def replicas(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "replicas")
@replicas.setter
def replicas(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replicas", value)
@property
@pulumi.getter(name="serviceStatus")
def service_status(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentStatusServiceStatusArgs']]]]:
return pulumi.get(self, "service_status")
@service_status.setter
def service_status(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['SeldonDeploymentStatusServiceStatusArgs']]]]):
pulumi.set(self, "service_status", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@pulumi.input_type
class SeldonDeploymentStatusAddressArgs:
def __init__(__self__, *,
url: Optional[pulumi.Input[str]] = None):
"""
Addressable placeholder until duckv1 issue is fixed: https://github.com/kubernetes-sigs/controller-tools/issues/391
"""
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@pulumi.input_type
class SeldonDeploymentStatusDeploymentStatusArgs:
def __init__(__self__, *,
available_replicas: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
explainer_for: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
replicas: Optional[pulumi.Input[int]] = None,
status: Optional[pulumi.Input[str]] = None):
if available_replicas is not None:
pulumi.set(__self__, "available_replicas", available_replicas)
if description is not None:
pulumi.set(__self__, "description", description)
if explainer_for is not None:
pulumi.set(__self__, "explainer_for", explainer_for)
if name is not None:
pulumi.set(__self__, "name", name)
if replicas is not None:
pulumi.set(__self__, "replicas", replicas)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="availableReplicas")
def available_replicas(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "available_replicas")
@available_replicas.setter
def available_replicas(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "available_replicas", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="explainerFor")
def explainer_for(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "explainer_for")
@explainer_for.setter
def explainer_for(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "explainer_for", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def replicas(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "replicas")
@replicas.setter
def replicas(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replicas", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class SeldonDeploymentStatusServiceStatusArgs:
def __init__(__self__, *,
explainer_for: Optional[pulumi.Input[str]] = None,
grpc_endpoint: Optional[pulumi.Input[str]] = None,
http_endpoint: Optional[pulumi.Input[str]] = None,
svc_name: Optional[pulumi.Input[str]] = None):
if explainer_for is not None:
pulumi.set(__self__, "explainer_for", explainer_for)
if grpc_endpoint is not None:
pulumi.set(__self__, "grpc_endpoint", grpc_endpoint)
if http_endpoint is not None:
pulumi.set(__self__, "http_endpoint", http_endpoint)
if svc_name is not None:
pulumi.set(__self__, "svc_name", svc_name)
@property
@pulumi.getter(name="explainerFor")
def explainer_for(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "explainer_for")
@explainer_for.setter
def explainer_for(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "explainer_for", value)
@property
@pulumi.getter(name="grpcEndpoint")
def grpc_endpoint(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "grpc_endpoint")
@grpc_endpoint.setter
def grpc_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "grpc_endpoint", value)
@property
@pulumi.getter(name="httpEndpoint")
def http_endpoint(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "http_endpoint")
@http_endpoint.setter
def http_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "http_endpoint", value)
@property
@pulumi.getter(name="svcName")
def svc_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "svc_name")
@svc_name.setter
def svc_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "svc_name", value)
| 54.385038
| 1,007
| 0.717374
| 113,202
| 1,020,644
| 6.342635
| 0.014381
| 0.077291
| 0.066923
| 0.027733
| 0.806789
| 0.793905
| 0.773528
| 0.732423
| 0.72008
| 0.71371
| 0
| 0.001708
| 0.19334
| 1,020,644
| 18,766
| 1,008
| 54.387936
| 0.870377
| 0.356813
| 0
| 0.792362
| 1
| 0
| 0.232286
| 0.181959
| 0
| 0
| 0
| 0.005222
| 0
| 1
| 0.20824
| false
| 0.003785
| 0.00043
| 0.012816
| 0.326768
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
134ce58ab0c3dc0f50a687584d2003bd8b266de0
| 20,807
|
py
|
Python
|
macropodus/network/preprocess/preprocess_generator.py
|
leileixiao/Macropodus
|
9de38c06d332bd26e704fd4afd8f44678de7f44f
|
[
"MIT"
] | 485
|
2019-12-31T16:53:28.000Z
|
2022-03-31T08:01:30.000Z
|
macropodus/network/preprocess/preprocess_generator.py
|
xiankaigit/Macropodus
|
1d7b8f9938cb8b6d7744e9caabc3eb41c8891283
|
[
"MIT"
] | 14
|
2020-03-07T04:17:47.000Z
|
2022-03-14T01:08:23.000Z
|
macropodus/network/preprocess/preprocess_generator.py
|
xiankaigit/Macropodus
|
1d7b8f9938cb8b6d7744e9caabc3eb41c8891283
|
[
"MIT"
] | 85
|
2020-01-16T05:03:07.000Z
|
2022-03-03T11:42:07.000Z
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2019/11/2 21:08
# @author : Mo
# @function: preprocess of network
from tensorflow.python.keras.utils import to_categorical
from macropodus.preprocess.tools_common import load_json
from macropodus.preprocess.tools_common import save_json
import numpy as np
import json
import os
class PreprocessGenerator:
"""
数据预处理, 输入为csv格式, [label,ques]
"""
def __init__(self, path_model_l2i_i2l):
self.path_model_l2i_i2l = path_model_l2i_i2l
self.l2i_i2l = None
if os.path.exists(self.path_model_l2i_i2l):
self.l2i_i2l = load_json(self.path_model_l2i_i2l)
def prereocess_i2l(self, pred):
"""
类标(idx)转类别(label)
:param pred:
:return:
"""
if os.path.exists(self.path_model_l2i_i2l):
i2l = self.l2i_i2l['i2l']
res = []
for i in range(len(pred)):
res.append(i2l[str(pred[i])])
return res
else:
raise RuntimeError("path_fast_text_model_label2index is None")
def prereocess_l2i(self, pred):
"""
类标(idx)转类别(label)
:param pred:
:return:
"""
if os.path.exists(self.path_model_l2i_i2l):
l2i = self.l2i_i2l['l2i']
res = []
for i in range(len(pred)):
res.append(l2i[str(pred[i])])
return res
else:
raise RuntimeError("path_fast_text_model_label2index is None")
def prereocess_idx2label(self, pred):
"""
类标(idx)转类别(label)
:param pred:
:return:
"""
if os.path.exists(self.path_model_l2i_i2l):
pred_i2l = {}
i2l = self.l2i_i2l['i2l']
for i in range(len(pred)):
pred_i2l[i] = i2l[str(pred[i])]
pred_i2l_rank = sorted(pred_i2l.items(), key=lambda k: k[0], reverse=False)
pred_i2l_rank_type = [pir[1] for pir in pred_i2l_rank]
return pred_i2l_rank_type
else:
raise RuntimeError("path_fast_text_model_label2index is None")
def prereocess_label2idx(self, pred):
"""
类别(label)转类标(idx)
:param pred:
:return:
"""
if os.path.exists(self.path_model_l2i_i2l):
pred_l2i = {}
l2i = self.l2i_i2l['l2i']
for i in range(len(pred)):
pred_l2i[pred[i]] = l2i[pred[i]]
pred_l2i_rank = [sorted(pred_l2i.items(), key=lambda k: k[1], reverse=True)]
return pred_l2i_rank
else:
raise RuntimeError("path_fast_text_model_label2index is None")
def preprocess_label2set(self, path, embedding_type, encoding="utf-8"):
"""
统计label个数, 以及具体的存在
:param path: str, like 'train.json'
:return:
"""
# 首先获取label,set,即存在的具体类
if embedding_type in ['bert', 'albert', 'xlnet']:
label_sets = set(["<PAD>", "<CLS>", "<SEP>"])
else:
label_sets = set(["<PAD>"])
len_all = 0
file_csv = open(path, "r", encoding=encoding)
for line in file_csv:
len_all += 1
if line.strip():
ques_label = json.loads(line.strip())
label_org = ques_label["label"]
label_sets = label_sets | set(label_org)
file_csv.close()
return label_sets, len_all
def preprocess_label_question_to_idx_fit_generator_kfold(self, embedding_type, batch_size, path, embed,
rate=1, epochs=1, crf_mode='reg', encoding="utf-8",
kfold=5, flag="train"):
"""
fit_generator用, 将句子, 类标转化为数字idx
:param embedding_type: str, like 'albert'
:param batch_size: int, like 64
:param path: str, like 'train.json'
:param embed: class, like embed
:param rate: float, like 0.9
:param crf_mode: str, like 'reg', 'pad'
:return: yield
"""
# 首先获取label,set,即存在的具体类
label_set, len_all = self.preprocess_label2set(path, embedding_type, encoding=encoding)
# 获取label转index字典等, 如果label2index存在则不转换了, dev验证集合的时候用
if not os.path.exists(self.path_model_l2i_i2l):
count = 0
label2index = {}
index2label = {}
for label_one in label_set:
label2index[label_one] = count
index2label[count] = label_one
count = count + 1
l2i_i2l = {}
l2i_i2l['l2i'] = label2index
l2i_i2l['i2l'] = index2label
save_json(l2i_i2l, self.path_model_l2i_i2l)
else:
l2i_i2l = load_json(self.path_model_l2i_i2l)
# 读取数据的比例
len_ql = int(rate * len_all)
if len_ql <= 500: # sample时候不生效,使得语料足够训练
len_ql = len_all
def process_line(line, embed, l2i_i2l):
"""
对每一条数据操作,获取label和问句index
:param line:
:param embed:
:param l2i_i2l:
:return:
"""
# 对每一条数据操作,对question和label进行padding
ques_label = json.loads(line.strip())
label_org = ques_label["label"]
label_index = [l2i_i2l["l2i"][lr] for lr in label_org]
# len_sequence = len(label_index)
que_embed = embed.sentence2idx("".join(ques_label["question"]))
# label padding
if embedding_type in ['bert', 'albert']:
# padding label
len_leave = embed.len_max - len(label_index) - 2
if len_leave >= 0:
label_index_leave = [l2i_i2l["l2i"]["<CLS>"]] + [li for li in label_index] + [
l2i_i2l["l2i"]["<PAD>"] for _ in range(len_leave)] + [l2i_i2l["l2i"]["<SEP>"]]
else:
label_index_leave = [l2i_i2l["l2i"]["<CLS>"]] + label_index[0:embed.len_max - 2] + [
l2i_i2l["l2i"]["<SEP>"]]
else:
# padding label
len_leave = embed.len_max - len(label_index) # -2
if len_leave >= 0:
label_index_leave = [li for li in label_index] + [l2i_i2l["l2i"]["<PAD>"] for i in range(len_leave)]
else:
label_index_leave = label_index[0:embed.len_max]
# 转为one-hot
label_res = to_categorical(label_index_leave, num_classes=len(l2i_i2l["l2i"]))
return que_embed, label_res
for i in range(epochs):
file_csv = open(path, "r", encoding=encoding)
cout_all_line = 0
cnt = 0
x, y = [], []
# count of epcoh
kfold_i = i%kfold
kfold_epcoh = int(len_ql / kfold)
kfold_steps = [kfold_i*kfold_epcoh, (kfold+1)*kfold_epcoh]
if flag=="train":
for line in file_csv:
if kfold_steps[0]<=cout_all_line<=kfold_steps[1]:
continue
# 跳出循环
if len_ql < cout_all_line:
break
cout_all_line += 1
if line.strip():
# 一个json一个json处理
# 备注:最好训练前先处理,使得ques长度小于等于len_max(word2vec), len_max-2(bert, albert)
x_line, y_line = process_line(line, embed, l2i_i2l)
x.append(x_line)
y.append(y_line.tolist())
cnt += 1
# 使用fit_generator时候, 每个batch_size进行yield
if cnt == batch_size:
# 通过两种方式处理: 1.嵌入类型(bert, word2vec, random), 2.条件随机场(CRF:'pad', 'reg')类型
if embedding_type in ['bert', 'albert']:
x_, y_ = np.array(x), np.array(y)
x_1 = np.array([x[0] for x in x_])
x_2 = np.array([x[1] for x in x_])
x_3 = np.array([x[2] for x in x_])
if crf_mode == 'pad':
x_all = [x_1, x_2, x_3]
elif crf_mode == 'reg':
x_all = [x_1, x_2]
else:
x_all = [x_1, x_2]
else:
x_, y_ = np.array(x), np.array(y)
x_1 = np.array([x[0] for x in x_])
x_2 = np.array([x[1] for x in x_])
if crf_mode == 'pad':
x_all = [x_1, x_2]
elif crf_mode == 'reg':
x_all = [x_1]
else:
x_all = [x_1]
cnt = 0
yield (x_all, y_)
x, y = [], []
file_csv.close()
else:
for line in file_csv:
if not kfold_steps[0] <= cout_all_line <= kfold_steps[1]:
continue
# 跳出循环
if len_ql < cout_all_line:
break
cout_all_line += 1
if line.strip():
# 一个json一个json处理
# 备注:最好训练前先处理,使得ques长度小于等于len_max(word2vec), len_max-2(bert, albert)
x_line, y_line = process_line(line, embed, l2i_i2l)
x.append(x_line)
y.append(y_line.tolist())
cnt += 1
# 使用fit_generator时候, 每个batch_size进行yield
if cnt == batch_size:
# 通过两种方式处理: 1.嵌入类型(bert, word2vec, random), 2.条件随机场(CRF:'pad', 'reg')类型
if embedding_type in ['bert', 'albert']:
x_, y_ = np.array(x), np.array(y)
x_1 = np.array([x[0] for x in x_])
x_2 = np.array([x[1] for x in x_])
x_3 = np.array([x[2] for x in x_])
if crf_mode == 'pad':
x_all = [x_1, x_2, x_3]
elif crf_mode == 'reg':
x_all = [x_1, x_2]
else:
x_all = [x_1, x_2]
else:
x_, y_ = np.array(x), np.array(y)
x_1 = np.array([x[0] for x in x_])
x_2 = np.array([x[1] for x in x_])
if crf_mode == 'pad':
x_all = [x_1, x_2]
elif crf_mode == 'reg':
x_all = [x_1]
else:
x_all = [x_1]
cnt = 0
yield (x_all, y_)
x, y = [], []
file_csv.close()
def preprocess_label_question_to_idx_fit_generator(self, embedding_type, batch_size, path, embed, rate=1, crf_mode='reg', epochs=1, encoding="utf-8"):
"""
fit_generator用, 将句子, 类标转化为数字idx
:param embedding_type: str, like 'albert'
:param batch_size: int, like 64
:param path: str, like 'train.json'
:param embed: class, like embed
:param rate: float, like 0.9
:param crf_mode: str, like 'reg', 'pad'
:return: yield
"""
# 首先获取label,set,即存在的具体类
label_set, len_all = self.preprocess_label2set(path, embedding_type, encoding=encoding)
# 获取label转index字典等, 如果label2index存在则不转换了, dev验证集合的时候用
if not os.path.exists(self.path_model_l2i_i2l):
count = 0
label2index = {}
index2label = {}
for label_one in label_set:
label2index[label_one] = count
index2label[count] = label_one
count = count + 1
l2i_i2l = {}
l2i_i2l['l2i'] = label2index
l2i_i2l['i2l'] = index2label
save_json(l2i_i2l, self.path_model_l2i_i2l)
else:
l2i_i2l = load_json(self.path_model_l2i_i2l)
# 读取数据的比例
len_ql = int(rate * len_all)
if len_ql <= 500: # sample时候不生效,使得语料足够训练
len_ql = len_all
def process_line(line, embed, l2i_i2l):
"""
对每一条数据操作,获取label和问句index
:param line:
:param embed:
:param l2i_i2l:
:return:
"""
# 对每一条数据操作,对question和label进行padding
ques_label = json.loads(line.strip())
label_org = ques_label["label"]
label_index = [l2i_i2l["l2i"][lr] for lr in label_org]
# len_sequence = len(label_index)
que_embed = embed.sentence2idx("".join(ques_label["question"]))
# label padding
if embedding_type in ['bert', 'albert']:
# padding label
len_leave = embed.len_max - len(label_index) - 2
if len_leave >= 0:
label_index_leave = [l2i_i2l["l2i"]["<CLS>"]] + [li for li in label_index] + [
l2i_i2l["l2i"]["<PAD>"] for _ in range(len_leave)] + [l2i_i2l["l2i"]["<SEP>"]]
else:
label_index_leave = [l2i_i2l["l2i"]["<CLS>"]] + label_index[0:embed.len_max - 2] + [
l2i_i2l["l2i"]["<SEP>"]]
else:
# padding label
len_leave = embed.len_max - len(label_index) # -2
if len_leave >= 0:
label_index_leave = [li for li in label_index] + [l2i_i2l["l2i"]["<PAD>"] for i in range(len_leave)]
else:
label_index_leave = label_index[0:embed.len_max]
# 转为one-hot
label_res = to_categorical(label_index_leave, num_classes=len(l2i_i2l["l2i"]))
return que_embed, label_res
for i in range(epochs):
file_csv = open(path, "r", encoding=encoding)
cout_all_line = 0
cnt = 0
x, y = [], []
for line in file_csv:
# 跳出循环
if len_ql < cout_all_line:
break
cout_all_line += 1
if line.strip():
# 一个json一个json处理
# 备注:最好训练前先处理,使得ques长度小于等于len_max(word2vec), len_max-2(bert, albert)
x_line, y_line = process_line(line, embed, l2i_i2l)
x.append(x_line)
y.append(y_line.tolist())
cnt += 1
# 使用fit_generator时候, 每个batch_size进行yield
if cnt == batch_size:
# 通过两种方式处理: 1.嵌入类型(bert, word2vec, random), 2.条件随机场(CRF:'pad', 'reg')类型
if embedding_type in ['bert', 'albert']:
x_, y_ = np.array(x), np.array(y)
x_1 = np.array([x[0] for x in x_])
x_2 = np.array([x[1] for x in x_])
x_3 = np.array([x[2] for x in x_])
if crf_mode == 'pad':
x_all = [x_1, x_2, x_3]
elif crf_mode == 'reg':
x_all = [x_1, x_2]
else:
x_all = [x_1, x_2]
else:
x_, y_ = np.array(x), np.array(y)
x_1 = np.array([x[0] for x in x_])
x_2 = np.array([x[1] for x in x_])
if crf_mode == 'pad':
x_all = [x_1, x_2]
elif crf_mode == 'reg':
x_all = [x_1]
else:
x_all = [x_1]
cnt = 0
yield (x_all, y_)
x, y = [], []
file_csv.close()
def preprocess_label_question_to_idx_fit(self, embedding_type, path, embed, rate=1, crf_mode='reg', encoding="utf-8"):
"""
fit用, 关键:对每一条数据操作,获取label和问句index
:param embedding_type: str, like 'albert'
:param path: str, like 'train.json'
:param embed: class, like embed
:param rate: float, like 0.9
:param crf_mode: str, like 'reg', 'pad'
:return: np.array
"""
# 首先获取label,set,即存在的具体类
label_set, len_all = self.preprocess_label2set(path, embedding_type, encoding=encoding)
# 获取label转index字典等, 如果label2index存在则不转换了, dev验证集合的时候用
if not os.path.exists(self.path_model_l2i_i2l):
count = 0
label2index = {}
index2label = {}
for label_one in label_set:
label2index[label_one] = count
index2label[count] = label_one
count = count + 1
l2i_i2l = {}
l2i_i2l['l2i'] = label2index
l2i_i2l['i2l'] = index2label
save_json(l2i_i2l, self.path_model_l2i_i2l)
else:
l2i_i2l = load_json(self.path_model_l2i_i2l)
# 读取数据的比例
len_ql = int(rate * len_all)
if len_ql <= 500: # sample时候不生效,使得语料足够训练
len_ql = len_all
def process_line(line, embed, l2i_i2l):
"""
对每一条数据操作,获取label和问句index
:param line:
:param embed:
:param l2i_i2l:
:return:
"""
# 对每一条数据操作,对question和label进行padding
ques_label = json.loads(line.strip())
label_org = ques_label["label"]
label_index = [l2i_i2l["l2i"][lr] for lr in label_org]
# len_sequence = len(label_index)
que_embed = embed.sentence2idx("".join(ques_label["question"]))
# label padding
if embedding_type in ['bert', 'albert']:
# padding label
len_leave = embed.len_max - len(label_index) - 2
if len_leave >= 0:
label_index_leave = [l2i_i2l["l2i"]["<CLS>"]] + [li for li in label_index] + [
l2i_i2l["l2i"]["<PAD>"] for _ in range(len_leave)] + [l2i_i2l["l2i"]["<SEP>"]]
else:
label_index_leave = [l2i_i2l["l2i"]["<CLS>"]] + label_index[0:embed.len_max - 2] + [
l2i_i2l["l2i"]["<SEP>"]]
else:
# padding label
len_leave = embed.len_max - len(label_index) # -2
if len_leave >= 0:
label_index_leave = [li for li in label_index] + [l2i_i2l["l2i"]["<PAD>"] for i in range(len_leave)]
else:
label_index_leave = label_index[0:embed.len_max]
# 转为one-hot
label_res = to_categorical(label_index_leave, num_classes=len(l2i_i2l["l2i"]))
return que_embed, label_res
file_csv = open(path, "r", encoding=encoding)
cout_all_line = 0
cnt = 0
x, y = [], []
for line in file_csv:
# 跳出循环
if len_ql < cout_all_line:
break
cout_all_line += 1
if line.strip():
# 一个json一个json处理
# 备注:最好训练前先处理,使得ques长度小于等于len_max(word2vec), len_max-2(bert, albert)
x_line, y_line = process_line(line, embed, l2i_i2l)
x.append(x_line)
y.append(y_line.tolist())
cnt += 1
# 通过两种方式处理: 1.嵌入类型(bert, word2vec, random), 2.条件随机场(CRF:'pad', 'reg')类型
if embedding_type in ['bert', 'albert']:
x_, y_ = np.array(x), np.array(y)
x_1 = np.array([x[0] for x in x_])
x_2 = np.array([x[1] for x in x_])
x_3 = np.array([x[2] for x in x_])
if crf_mode == 'pad':
x_all = [x_1, x_2, x_3]
elif crf_mode == 'reg':
x_all = [x_1, x_2]
else:
x_all = [x_1, x_2]
else:
x_, y_ = np.array(x), np.array(y)
x_1 = np.array([x[0] for x in x_])
x_2 = np.array([x[1] for x in x_])
if crf_mode == 'pad':
x_all = [x_1, x_2]
elif crf_mode == 'reg':
x_all = x_1
else:
x_all = x_1
# 使用fit的时候, return返回
return x_all, y_
| 40.798039
| 154
| 0.467391
| 2,401
| 20,807
| 3.789254
| 0.083299
| 0.048142
| 0.032645
| 0.015828
| 0.898219
| 0.889426
| 0.865465
| 0.840185
| 0.834469
| 0.811827
| 0
| 0.036374
| 0.425242
| 20,807
| 509
| 155
| 40.878193
| 0.724392
| 0.139232
| 0
| 0.867435
| 0
| 0
| 0.034479
| 0.007455
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034582
| false
| 0
| 0.017291
| 0
| 0.080692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1362f376c1ed142bd59ef003d5d176ea60b0c099
| 744
|
py
|
Python
|
src/spacexpython/capsules.py
|
fossabot/SpacePY-X
|
2bdcb055bc4e4efed9df88b0ee778253d8adc0c7
|
[
"MIT"
] | null | null | null |
src/spacexpython/capsules.py
|
fossabot/SpacePY-X
|
2bdcb055bc4e4efed9df88b0ee778253d8adc0c7
|
[
"MIT"
] | 4
|
2019-08-02T13:29:45.000Z
|
2019-08-15T13:16:36.000Z
|
src/spacexpython/capsules.py
|
alshapton/SpaceX-Python
|
4d000e4d08cc073446d00bf26e9e22fca082dc48
|
[
"MIT"
] | null | null | null |
from . import urldata
from . import utils
def capsules(parameters='',timeOut=1):
requestUrl = urldata.Domain.main + urldata.Domain.main_capsules
return utils.makeRequest(requestUrl,timeOut,parameters)
def upcoming(parameters='',timeOut=1):
requestUrl = urldata.Domain.main + urldata.Domain.upcoming_capsules
return utils.makeRequest(requestUrl,timeOut,parameters)
def past(parameters='',timeOut=1):
requestUrl = urldata.Domain.main + urldata.Domain.past_capsules
return utils.makeRequest(requestUrl,timeOut,parameters)
def one(capsule_id,parameters='',timeOut=1):
requestUrl = urldata.Domain.main + urldata.Domain.main_capsules + "/" + str(capsule_id)
return utils.makeRequest(requestUrl,timeOut,parameters)
| 39.157895
| 91
| 0.771505
| 87
| 744
| 6.528736
| 0.229885
| 0.183099
| 0.179577
| 0.197183
| 0.839789
| 0.839789
| 0.753521
| 0.753521
| 0.450704
| 0.246479
| 0
| 0.006061
| 0.112903
| 744
| 18
| 92
| 41.333333
| 0.854545
| 0
| 0
| 0.285714
| 0
| 0
| 0.001344
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0
| 0.714286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
136f9de1217ec1d1cda839e85d79ce85a513e527
| 122,440
|
py
|
Python
|
draco/tests/test_soft.py
|
Zehua-Zeng/draco2
|
cef1e7abb792282266dbffb6a5653e257f9ba732
|
[
"MIT"
] | null | null | null |
draco/tests/test_soft.py
|
Zehua-Zeng/draco2
|
cef1e7abb792282266dbffb6a5653e257f9ba732
|
[
"MIT"
] | null | null | null |
draco/tests/test_soft.py
|
Zehua-Zeng/draco2
|
cef1e7abb792282266dbffb6a5653e257f9ba732
|
[
"MIT"
] | null | null | null |
from draco.asp_utils import Block
from draco.programs import define, helpers, soft
from draco.run import run_clingo
def list_preferences(program: str):
try:
model = next(run_clingo(helpers.program + define.program + program, 1))
return sorted(
[
tuple(map(lambda x: x.name, symbol.arguments))
for symbol in model.answer_set
if symbol.name == "preference"
]
)
except StopIteration:
return None
def test_list_preferences():
assert list_preferences(":- a. :- not a.") is None
def test_aggregate():
b = soft.blocks["aggregate"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute((mark,type),m1,text).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,text).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute((encoding,aggregate),e1,mean).
"""
)
== [("aggregate", "e1")]
)
assert (
list_preferences(
b.program
+ """
attribute((encoding,aggregate),e1,mean).
attribute((encoding,aggregate),e2,mean).
"""
)
== [("aggregate", "e1"), ("aggregate", "e2")]
)
def test_bin():
b = soft.blocks["bin"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute((mark,type),m1,text).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,text).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute((encoding,binning),e1,10).
"""
)
== [("bin", "e1")]
)
assert (
list_preferences(
b.program
+ """
attribute((encoding,binning),e1,10).
attribute((encoding,binning),e2,20).
"""
)
== [("bin", "e1"), ("bin", "e2")]
)
def test_bin_high():
b = soft.blocks["bin_high"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute((encoding,binning),e1,8).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute((encoding,binning),e1,13).
"""
)
== [("bin_high", "e1")]
)
assert (
list_preferences(
b.program
+ """
attribute((encoding,binning),e1,14).
attribute((encoding,binning),e2,20).
"""
)
== [("bin_high", "e1"), ("bin_high", "e2")]
)
def test_bin_low():
b = soft.blocks["bin_low"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute((encoding,binning),e1,8).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute((encoding,binning),e1,3).
"""
)
== [("bin_low", "e1")]
)
assert (
list_preferences(
b.program
+ """
attribute((encoding,binning),e1,7).
attribute((encoding,binning),e2,3).
"""
)
== [("bin_low", "e1"), ("bin_low", "e2")]
)
def test_encoding():
b = soft.blocks["encoding"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute((mark,type),m1,text).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute((mark,type),m1,text).
entity(encoding,m1,e1).
"""
)
== [("encoding", "e1")]
)
assert (
list_preferences(
b.program
+ """
attribute((mark,type),m1,text).
entity(encoding,m1,e1).
entity(encoding,m1,e2).
"""
)
== [("encoding", "e1"), ("encoding", "e2")]
)
def test_encoding_field():
b = soft.blocks["encoding_field"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute((encoding,aggregate),e1,mean).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute((encoding,field),e1,temperature).
"""
)
== [("encoding_field", "e1")]
)
assert (
list_preferences(
b.program
+ """
attribute((encoding,field),e1,temperature).
attribute((encoding,field),e2,date).
"""
)
== [("encoding_field", "e1"), ("encoding_field", "e2")]
)
def test_same_field():
b = soft.blocks["same_field"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(field,root,temperature).
entity(field,root,date).
entity(mark,v,m).
entity(encoding,m,e1).
entity(encoding,m,e2).
attribute((encoding,field),e1,temperature).
attribute((encoding,field),e2,date).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(field,root,temperature).
entity(mark,v,m).
entity(encoding,m,e1).
entity(encoding,m,e2).
attribute((encoding,field),e1,temperature).
attribute((encoding,field),e2,temperature).
"""
)
== [("same_field", "temperature")]
)
assert (
list_preferences(
b.program
+ """
entity(field,root,temperature).
entity(field,root,date).
entity(mark,v,m).
entity(encoding,m,e1).
entity(encoding,m,e2).
entity(encoding,m,e3).
entity(encoding,m,e4).
attribute((encoding,field),e1,temperature).
attribute((encoding,field),e2,temperature).
attribute((encoding,field),e3,date).
attribute((encoding,field),e4,date).
"""
)
== [("same_field", "date"), ("same_field", "temperature")]
)
# use field temperature twice with mark m1, and field date with 2 different marks
assert (
list_preferences(
b.program
+ """
entity(field,root,temperature).
entity(field,root,date).
entity(mark,v,m1).
entity(mark,v,m2).
entity(encoding,m1,e1).
entity(encoding,m1,e2).
entity(encoding,m2,e3).
entity(encoding,m1,e4).
attribute((encoding,field),e1,temperature).
attribute((encoding,field),e2,temperature).
attribute((encoding,field),e3,date).
attribute((encoding,field),e4,date).
"""
)
== [("same_field", "temperature")]
)
def test_same_field_grt3():
b = soft.blocks["same_field_grt3"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(field,root,temperature).
entity(field,root,date).
entity(mark,v,m).
entity(encoding,m,e1).
entity(encoding,m,e2).
attribute((encoding,field),e1,temperature).
attribute((encoding,field),e2,date).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(field,root,temperature).
entity(mark,v,m).
entity(encoding,m,e1).
entity(encoding,m,e2).
attribute((encoding,field),e1,temperature).
attribute((encoding,field),e2,temperature).
"""
)
== []
)
# use field temperature 3 times, but with 2 different marks.
assert (
list_preferences(
b.program
+ """
entity(field,root,temperature).
entity(mark,v,m1).
entity(mark,v,m2).
entity(encoding,m1,e1).
entity(encoding,m1,e2).
entity(encoding,m2,e3).
attribute((encoding,field),e1,temperature).
attribute((encoding,field),e2,temperature).
attribute((encoding,field),e3,temperature).
"""
)
== []
)
# use field temperature 3 times with the same mark
assert (
list_preferences(
b.program
+ """
entity(field,root,temperature).
entity(mark,v,m).
entity(encoding,m,e1).
entity(encoding,m,e2).
entity(encoding,m,e3).
attribute((encoding,field),e1,temperature).
attribute((encoding,field),e2,temperature).
attribute((encoding,field),e3,temperature).
"""
)
== [("same_field_grt3", "temperature")]
)
# use field temperature 4 times with the same mark
assert (
list_preferences(
b.program
+ """
entity(field,root,temperature).
entity(mark,v,m).
entity(encoding,m,e1).
entity(encoding,m,e2).
entity(encoding,m,e3).
entity(encoding,m,e4).
attribute((encoding,field),e1,temperature).
attribute((encoding,field),e2,temperature).
attribute((encoding,field),e3,temperature).
attribute((encoding,field),e4,temperature).
"""
)
== [("same_field_grt3", "temperature")]
)
def test_count_grt1():
b = soft.blocks["count_grt1"]
assert isinstance(b, Block)
# only 1 count
assert (
list_preferences(
b.program
+ """
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,aggregate),e1,count).
"""
)
== []
)
# 2 aggregate, but not only 1 count
assert (
list_preferences(
b.program
+ """
entity(mark,v,m).
entity(encoding,m,e1).
entity(encoding,m,e2).
attribute((encoding,aggregate),e1,sum).
attribute((encoding,aggregate),e2,count).
"""
)
== []
)
# 2 counts
assert (
list_preferences(
b.program
+ """
entity(mark,v,m).
entity(encoding,m,e1).
entity(encoding,m,e2).
attribute((encoding,aggregate),e1,count).
attribute((encoding,aggregate),e2,count).
"""
)
== [("count_grt1", "m")]
)
# 3 counts
assert (
list_preferences(
b.program
+ """
entity(mark,v,m).
entity(encoding,m,e1).
entity(encoding,m,e2).
entity(encoding,m,e3).
attribute((encoding,aggregate),e1,count).
attribute((encoding,aggregate),e2,count).
attribute((encoding,aggregate),e3,count).
"""
)
== [("count_grt1", "m")]
)
def test_number_categorical():
b = soft.blocks["number_categorical"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute((field,type),temperature,number).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,temperature).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== []
)
# root scale, categorical for number
assert (
list_preferences(
b.program
+ """
attribute((field,type),temperature,number).
entity(view,root,v).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,temperature).
attribute((encoding,channel),e1,x).
entity(scale,root,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,categorical).
"""
)
== [("number_categorical", "e1")]
)
# two scales, categorical for number
assert (
list_preferences(
b.program
+ """
attribute((field,type),temperature,number).
entity(mark,v1,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,temperature).
attribute((encoding,channel),e1,x).
entity(scale,v1,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,categorical).
"""
)
== [("number_categorical", "e1")]
)
# number field used for two times (in same mark)
assert (
list_preferences(
b.program
+ """
attribute((field,type),temperature,number).
entity(mark,v1,m1).
entity(encoding,m1,e1).
attribute((encoding,field),e1,temperature).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,field),e2,temperature).
attribute((encoding,channel),e2,color).
entity(scale,v1,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
entity(scale,v1,s2).
attribute((scale,channel),s2,color).
attribute((scale,type),s2,categorical).
"""
)
== [("number_categorical", "e2")]
)
def test_bin_low_unique():
b = soft.blocks["bin_low_unique"]
assert isinstance(b, Block)
# number
assert (
list_preferences(
b.program
+ """
attribute((field,type),temperature,number).
attribute((field,unique),temperature,50).
attribute((encoding,field),e1,temperature).
attribute((encoding,binning),e1,20).
"""
)
== []
)
# datetime
assert (
list_preferences(
b.program
+ """
attribute((field,type),date,datetime).
attribute((field,unique),date,10).
attribute((encoding,field),e1,date).
attribute((encoding,binning),e1,5).
"""
)
== [("bin_low_unique", "e1")]
)
def test_bin_not_linear():
b = soft.blocks["bin_not_linear"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,15).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== []
)
# scale on root
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,15).
entity(scale,root,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== []
)
# log scale
assert (
list_preferences(
b.program
+ """
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,15).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,log).
"""
)
== [("bin_not_linear", "e1")]
)
def test_only_discrete():
b = soft.blocks["only_discrete"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== []
)
# 1 encoding
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,v,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,categorical).
"""
)
== [("only_discrete", "m1")]
)
# 2 encodings
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
attribute((encoding,binning),e2,10).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
"""
)
== [("only_discrete", "m1")]
)
# shared scale
assert (
list_preferences(
b.program
+ """
entity(view,root,v1).
entity(mark,v1,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
attribute((encoding,binning),e2,10).
entity(view,root,v2).
entity(mark,v2,m2).
entity(encoding,m2,e3).
attribute((encoding,channel),e3,color).
entity(scale,root,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
entity(scale,v2,s2).
attribute((scale,channel),s2,color).
attribute((scale,type),s2,categorical).
"""
)
== [("only_discrete", "m1"), ("only_discrete", "m2")]
)
def test_multi_non_pos():
b = soft.blocks["multi_non_pos"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,size).
"""
)
== [("multi_non_pos", "m1")]
)
def test_non_pos_used_before_pos():
b = soft.blocks["non_pos_used_before_pos"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
"""
)
== []
)
# both x and y are not used yet
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
"""
)
== [("non_pos_used_before_pos", "m1")]
)
# x is not used yet
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,color).
"""
)
== [("non_pos_used_before_pos", "m1")]
)
def test_aggregate_group_by_raw():
b = soft.blocks["aggregate_group_by_raw"]
assert isinstance(b, Block)
# discrete: bin
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,aggregate),e1,count).
entity(encoding,m1,e2).
attribute((encoding,binning),e2,10).
"""
)
== []
)
# discrete scale
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,aggregate),e1,count).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
# aggregate, not raw continous
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,aggregate),e1,count).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,x).
attribute((encoding,aggregate),e2,max).
"""
)
== []
)
# raw continous
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,aggregate),e1,count).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== [("aggregate_group_by_raw", "e2")]
)
def test_aggregate_no_discrete():
b = soft.blocks["aggregate_no_discrete"]
assert isinstance(b, Block)
# discrete: bin
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,aggregate),e1,count).
entity(encoding,m1,e2).
attribute((encoding,binning),e2,10).
"""
)
== []
)
# discrete scale
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,aggregate),e1,count).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
# aggregate continous
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,aggregate),e1,count).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,x).
attribute((encoding,aggregate),e2,max).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== [("aggregate_no_discrete", "m1")]
)
# raw continous
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,aggregate),e1,count).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== [("aggregate_no_discrete", "m1")]
)
def test_x_y_raw():
b = soft.blocks["x_y_raw"]
assert isinstance(b, Block)
# x discrete, color raw continuous
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,color).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
entity(scale,v,s2).
attribute((scale,channel),s2,color).
attribute((scale,type),s2,linear).
"""
)
== []
)
# x, y discrete, but size is aggregate
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(encoding,m1,e3).
attribute((encoding,channel),e3,size).
attribute((encoding,aggregate),e3,mean).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
entity(scale,v,s2).
attribute((scale,channel),s2,y).
attribute((scale,type),s2,ordinal).
entity(scale,v,s3).
attribute((scale,channel),s3,size).
attribute((scale,type),s3,linear).
"""
)
== []
)
# x discrete, y continuous, color raw continuous
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(encoding,m1,e3).
attribute((encoding,channel),e3,color).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
entity(scale,v,s2).
attribute((scale,channel),s2,y).
attribute((scale,type),s2,linear).
entity(scale,v,s3).
attribute((scale,channel),s3,color).
attribute((scale,type),s3,linear).
"""
)
== []
)
# x, y discrete, color raw continuous
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(encoding,m1,e3).
attribute((encoding,channel),e3,color).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
entity(scale,v,s2).
attribute((scale,channel),s2,y).
attribute((scale,type),s2,ordinal).
entity(scale,v,s3).
attribute((scale,channel),s3,color).
attribute((scale,type),s3,linear).
"""
)
== [("x_y_raw", "m1")]
)
def test_continuous_not_zero():
b = soft.blocks["continuous_not_zero"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
attribute((scale,zero),s1,true).
"""
)
== []
)
# bin no need to zero
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binnning),e1,10).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== [("continuous_not_zero", "e1")]
)
def test_size_not_zero():
b = soft.blocks["size_not_zero"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,size).
entity(scale,v,s1).
attribute((scale,channel),s1,size).
attribute((scale,type),s1,linear).
attribute((scale,zero),s1,true).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,size).
entity(scale,v,s1).
attribute((scale,channel),s1,size).
attribute((scale,type),s1,linear).
"""
)
== [("size_not_zero", "e1")]
)
# even bin needs to zero
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,size).
attribute((encoding,binnning),e1,10).
entity(scale,v,s1).
attribute((scale,channel),s1,size).
attribute((scale,type),s1,ordinal).
"""
)
== [("size_not_zero", "e1")]
)
def test_continuous_pos_not_zero():
b = soft.blocks["continuous_pos_not_zero"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
attribute((scale,zero),s1,true).
"""
)
== []
)
# bin no need to zero
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binnning),e1,10).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== [("continuous_pos_not_zero", "e1")]
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
entity(scale,v,s2).
attribute((scale,channel),s2,y).
attribute((scale,type),s2,linear).
"""
)
== [("continuous_pos_not_zero", "e1"), ("continuous_pos_not_zero", "e2")]
)
def test_skew_zero():
b = soft.blocks["skew_zero"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute((field,min),temperature,5).
attribute((field,max),temperature,20).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e,temperature).
attribute((encoding,channel),e1,y).
entity(scale,v,s).
attribute((scale,channel),s,y).
attribute((scale,zero),s,true).
"""
)
== []
)
# both max and min are positive
assert (
list_preferences(
b.program
+ """
attribute((field,min),temperature,800).
attribute((field,max),temperature,1000).
entity(mark,v,m).
entity(encoding,m,e).
attribute((encoding,field),e,temperature).
attribute((encoding,channel),e,y).
entity(scale,v,s).
attribute((scale,channel),s,y).
attribute((scale,zero),s,true).
"""
)
== [("skew_zero", "e")]
)
# both max and min are negative
assert (
list_preferences(
b.program
+ """
attribute((field,min),temperature,-500).
attribute((field,max),temperature,-700).
entity(mark,v,m).
entity(encoding,m,e).
attribute((encoding,field),e,temperature).
attribute((encoding,channel),e,y).
entity(scale,v,s).
attribute((scale,channel),s,y).
attribute((scale,zero),s,true).
"""
)
== [("skew_zero", "e")]
)
def test_cross_zero():
b = soft.blocks["cross_zero"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute((field,min),temperature,0).
attribute((field,max),temperature,200).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e,temperature).
attribute((encoding,channel),e1,y).
entity(scale,v,s).
attribute((scale,channel),s,y).
attribute((scale,zero),s,true).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute((field,min),temperature,-200).
attribute((field,max),temperature,1000).
entity(mark,v,m).
entity(encoding,m,e).
attribute((encoding,field),e,temperature).
attribute((encoding,channel),e,y).
entity(scale,v,s).
attribute((scale,channel),s,y).
attribute((scale,zero),s,true).
"""
)
== [("cross_zero", "e")]
)
def test_only_y():
b = soft.blocks["only_y"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
"""
)
== []
)
# 1 mark with channel y and size
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,size).
"""
)
== [("only_y", "m1")]
)
# 2 marks
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
entity(mark,v,m2).
entity(encoding,m2,e2).
attribute((encoding,channel),e2,x).
"""
)
== [("only_y", "m1")]
)
def test_binned_orientation_not_x():
b = soft.blocks["binned_orientation_not_x"]
assert isinstance(b, Block)
# number
assert (
list_preferences(
b.program
+ """
attribute((field,type),temperature,number).
attribute((encoding,field),e1,temperature).
attribute((encoding,binning),e1,20).
attribute((encoding,channel),e1,x).
"""
)
== []
)
# datetime
assert (
list_preferences(
b.program
+ """
attribute((field,type),date,datetime).
attribute((encoding,field),e1,date).
attribute((encoding,binning),e1,20).
attribute((encoding,channel),e1,y).
"""
)
== [("binned_orientation_not_x", "e1")]
)
def test_high_cardinality_ordinal():
b = soft.blocks["high_cardinality_ordinal"]
assert isinstance(b, Block)
# low cardinality: binning
assert (
list_preferences(
b.program
+ """
attribute((field,unique),date,1461).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,date).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,20).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
# high cardinality: unique
assert (
list_preferences(
b.program
+ """
attribute((field,unique),date,1461).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,date).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
"""
)
== [("high_cardinality_ordinal", "e1")]
)
def test_high_cardinality_categorical_grt10():
b = soft.blocks["high_cardinality_categorical_grt10"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute((field,unique),weather,5).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,weather).
attribute((encoding,channel),e1,color).
entity(scale,v,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,categorical).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute((field,unique),weather,15).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,weather).
attribute((encoding,channel),e1,color).
entity(scale,v,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,categorical).
"""
)
== [("high_cardinality_categorical_grt10", "e1")]
)
def test_high_cardinality_shape():
b = soft.blocks["high_cardinality_shape"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute((field,unique),date,25).
attribute((encoding,field),e1,date).
attribute((encoding,channel),e1,shape).
attribute((encoding,binning),e1,5).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute((field,unique),weather,15).
attribute((encoding,field),e1,weather).
attribute((encoding,channel),e1,shape).
"""
)
== [("high_cardinality_shape", "e1")]
)
def test_high_cardinality_size():
b = soft.blocks["high_cardinality_size"]
assert isinstance(b, Block)
# x is not continuous, binning
assert (
list_preferences(
b.program
+ """
attribute((field,unique),date,1461).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,date).
attribute((encoding,binning),e1,20).
attribute((encoding,channel),e1,x).
entity(encoding,m,e2).
attribute((encoding,channel),e1,size).
"""
)
== []
)
# x is not continuous, ordinal scale
assert (
list_preferences(
b.program
+ """
attribute((field,unique),weather,150).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,weather).
attribute((encoding,channel),e1,x).
entity(encoding,m,e2).
attribute((encoding,channel),e1,size).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
# y is continuous with high cardinality
assert (
list_preferences(
b.program
+ """
attribute((field,unique),temperature,150).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,temperature).
attribute((encoding,channel),e1,y).
entity(encoding,m,e2).
attribute((encoding,channel),e2,size).
entity(scale,v,s1).
attribute((scale,channel),s1,y).
attribute((scale,type),s1,linear).
"""
)
== [("high_cardinality_size", "e1")]
)
def test_horizontal_scrolling_x():
b = soft.blocks["horizontal_scrolling_x"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute((field,unique),date,1461).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,date).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,20).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute((field,unique),date,1461).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,date).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute((field,unique),date,1461).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,date).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
"""
)
== [("horizontal_scrolling_x", "e1")]
)
def test_horizontal_scrolling_col():
b = soft.blocks["horizontal_scrolling_col"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute((field,unique),date,1461).
entity(facet,v,f).
attribute((facet,field),f,date).
attribute((facet,channel),f,col).
attribute((facet,binning),f,5).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute((field,unique),date,1461).
entity(facet,v,f).
attribute((facet,field),f,date).
attribute((facet,channel),f,col).
"""
)
== [("horizontal_scrolling_col", "f")]
)
def test_date_scale():
b = soft.blocks["date_scale"]
assert isinstance(b, Block)
# scale on view, linear
assert (
list_preferences(
b.program
+ """
attribute((field,type),date,datetime).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,date).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== []
)
# scale on root, ordinal
assert (
list_preferences(
b.program
+ """
attribute((field,type),date,datetime).
entity(view,root,v).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,date).
attribute((encoding,channel),e1,x).
entity(scale,root,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
# scale on view, log
assert (
list_preferences(
b.program
+ """
attribute((field,type),date,datetime).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,date).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,log).
"""
)
== [("date_scale", "e1")]
)
# scale on root, categorical
assert (
list_preferences(
b.program
+ """
attribute((field,type),date,datetime).
entity(view,root,v).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,date).
attribute((encoding,channel),e1,color).
entity(scale,root,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,categorical).
"""
)
== [("date_scale", "e1")]
)
def test_number_linear():
b = soft.blocks["number_linear"]
assert isinstance(b, Block)
# scale on view, linear
assert (
list_preferences(
b.program
+ """
attribute((field,type),temperture,number).
attribute((field,unique),temperture,111).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,temperture).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== []
)
# scale on root, with binning, ordinal
assert (
list_preferences(
b.program
+ """
attribute((field,type),temperture,number).
attribute((field,unique),temperture,111).
entity(view,root,v).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,temperture).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,20).
entity(scale,root,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
# scale on view, ordinal
assert (
list_preferences(
b.program
+ """
attribute((field,type),temperture,number).
attribute((field,unique),temperture,111).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,temperture).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
"""
)
== [("number_linear", "e1")]
)
# scale on root, log
assert (
list_preferences(
b.program
+ """
attribute((field,type),temperture,number).
attribute((field,unique),temperture,111).
entity(view,root,v).
entity(mark,v,m).
entity(encoding,m,e1).
attribute((encoding,field),e1,temperture).
attribute((encoding,channel),e1,x).
entity(scale,root,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,log).
"""
)
== [("number_linear", "e1")]
)
def test_value_agg():
b = soft.blocks["value_agg"]
assert isinstance(b, Block)
# value task, no agg
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,binning),e1,20).
"""
)
== []
)
# summary task, v has agg
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,aggregate),e1,max).
"""
)
== []
)
# value task, v1 has agg
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(view,root,v1).
entity(mark,v1,m1).
entity(encoding,m1,e1).
attribute((encoding,aggregate),e1,max).
entity(view,root,v2).
entity(mark,v2,m2).
entity(encoding,m2,e2).
attribute((encoding,binning),e2,20).
"""
)
== [("value_agg", "v1")]
)
def test_summary_facet():
b = soft.blocks["summary_facet"]
assert isinstance(b, Block)
# summary task, no facet
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,binning),e1,20).
"""
)
== []
)
# value task, v has facet
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(view,root,v).
entity(facet,v,f).
"""
)
== []
)
# summary task, v has facet
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(view,root,v1).
entity(facet,v1,f).
entity(view,root,v2).
entity(mark,v2,m1).
entity(encoding,m1,e1).
attribute((encoding,binning),e1,20).
"""
)
== [("summary_facet", "v1")]
)
def test_c_d_col():
b = soft.blocks["c_d_col"]
assert isinstance(b, Block)
# continuous x, discrete y (binning), row
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
attribute((encoding,binning),e2,10).
entity(facet,v,f).
attribute((facet,channel),f,row).
"""
)
== []
)
# continuous y, discrete x (binning), column
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,10).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(facet,v,f).
attribute((facet,channel),f,col).
"""
)
== []
)
# continuous x, discrete y (ordinal), column
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(scale,v,s1).
attribute((scale,channel),s1,y).
attribute((scale,type),s1,ordinal).
entity(facet,v,f).
attribute((facet,channel),f,col).
"""
)
== [("c_d_col", "v")]
)
def test_date_not_x():
b = soft.blocks["date_not_x"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute((field,type),date,datetime).
entity(encoding,m1,e1).
attribute((encoding,field),e1,date).
attribute((encoding,channel),e1,x).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute((field,type),date,datetime).
entity(encoding,m1,e1).
attribute((encoding,field),e1,date).
attribute((encoding,channel),e1,y).
"""
)
== [("date_not_x", "e1")]
)
def test_x_row():
b = soft.blocks["x_row"]
assert isinstance(b, Block)
# x and row not in the same view
assert (
list_preferences(
b.program
+ """
entity(mark,v1,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(facet,v2,f1).
attribute((facet,channel),f1,row).
"""
)
== []
)
# x and col in the same view
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(facet,v,f1).
attribute((facet,channel),f1,col).
"""
)
== []
)
# x and row in the same view
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(facet,v,f1).
attribute((facet,channel),f1,row).
"""
)
== [("x_row", "v")]
)
def test_y_row():
b = soft.blocks["y_row"]
assert isinstance(b, Block)
# y and row not in the same view
assert (
list_preferences(
b.program
+ """
entity(mark,v1,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
entity(facet,v2,f1).
attribute((facet,channel),f1,row).
"""
)
== []
)
# y and col in the same view
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
entity(facet,v,f1).
attribute((facet,channel),f1,col).
"""
)
== []
)
# y and row in the same view
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
entity(facet,v,f1).
attribute((facet,channel),f1,row).
"""
)
== [("y_row", "v")]
)
def test_x_col():
b = soft.blocks["x_col"]
assert isinstance(b, Block)
# x and col not in the same view
assert (
list_preferences(
b.program
+ """
entity(mark,v1,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(facet,v2,f1).
attribute((facet,channel),f1,col).
"""
)
== []
)
# x and row in the same view
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(facet,v,f1).
attribute((facet,channel),f1,row).
"""
)
== []
)
# x and col in the same view
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(facet,v,f1).
attribute((facet,channel),f1,col).
"""
)
== [("x_col", "v")]
)
def test_y_col():
b = soft.blocks["y_col"]
assert isinstance(b, Block)
# y and col not in the same view
assert (
list_preferences(
b.program
+ """
entity(mark,v1,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
entity(facet,v2,f1).
attribute((facet,channel),f1,col).
"""
)
== []
)
# y and row in the same view
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
entity(facet,v,f1).
attribute((facet,channel),f1,row).
"""
)
== []
)
# y and col in the same view
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
entity(facet,v,f1).
attribute((facet,channel),f1,col).
"""
)
== [("y_col", "v")]
)
def test_color_entropy_high():
b = soft.blocks["color_entropy_high"]
assert isinstance(b, Block)
# scale on view
assert (
list_preferences(
b.program
+ """
attribute((field,entropy),precipitation,200).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,field),e1,precipitation).
attribute((encoding,channel),e1,color).
entity(scale,v,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,linear).
"""
)
== []
)
# scale on root
assert (
list_preferences(
b.program
+ """
attribute((field,entropy),precipitation,6000).
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,field),e1,precipitation).
attribute((encoding,channel),e1,color).
entity(scale,root,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,linear).
"""
)
== [("color_entropy_high", "e1")]
)
def test_color_entropy_low():
b = soft.blocks["color_entropy_low"]
assert isinstance(b, Block)
# scale on view
assert (
list_preferences(
b.program
+ """
attribute((field,entropy),precipitation,6000).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,field),e1,precipitation).
attribute((encoding,channel),e1,color).
entity(scale,v,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,linear).
"""
)
== []
)
# scale on root
assert (
list_preferences(
b.program
+ """
attribute((field,entropy),precipitation,5000).
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,field),e1,precipitation).
attribute((encoding,channel),e1,color).
entity(scale,root,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,linear).
"""
)
== [("color_entropy_low", "e1")]
)
def test_size_entropy_high():
b = soft.blocks["size_entropy_high"]
assert isinstance(b, Block)
# scale on view
assert (
list_preferences(
b.program
+ """
attribute((field,entropy),precipitation,3000).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,field),e1,precipitation).
attribute((encoding,channel),e1,size).
entity(scale,v,s1).
attribute((scale,channel),s1,size).
attribute((scale,type),s1,linear).
"""
)
== []
)
# scale on root
assert (
list_preferences(
b.program
+ """
attribute((field,entropy),precipitation,6000).
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,field),e1,precipitation).
attribute((encoding,channel),e1,size).
entity(scale,root,s1).
attribute((scale,channel),s1,size).
attribute((scale,type),s1,linear).
"""
)
== [("size_entropy_high", "e1")]
)
def test_size_entropy_low():
b = soft.blocks["size_entropy_low"]
assert isinstance(b, Block)
# scale on view
assert (
list_preferences(
b.program
+ """
attribute((field,entropy),precipitation,6000).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,field),e1,precipitation).
attribute((encoding,channel),e1,size).
entity(scale,v,s1).
attribute((scale,channel),s1,size).
attribute((scale,type),s1,linear).
"""
)
== []
)
# scale on root
assert (
list_preferences(
b.program
+ """
attribute((field,entropy),precipitation,4000).
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,field),e1,precipitation).
attribute((encoding,channel),e1,size).
entity(scale,root,s1).
attribute((scale,channel),s1,size).
attribute((scale,type),s1,linear).
"""
)
== [("size_entropy_low", "e1")]
)
def test_linear_scale():
b = soft.blocks["linear_scale"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,v,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,categorical).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== [("linear_scale", "e1")]
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
entity(scale,v,s2).
attribute((scale,channel),s2,y).
attribute((scale,type),s2,linear).
"""
)
== [("linear_scale", "e1"), ("linear_scale", "e2")]
)
def test_log_scale():
b = soft.blocks["log_scale"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,v,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,categorical).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,log).
"""
)
== [("log_scale", "e1")]
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,log).
entity(scale,v,s2).
attribute((scale,channel),s2,y).
attribute((scale,type),s2,log).
"""
)
== [("log_scale", "e1"), ("log_scale", "e2")]
)
def test_ordinal_scale():
b = soft.blocks["ordinal_scale"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,v,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,categorical).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
"""
)
== [("ordinal_scale", "e1")]
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
entity(scale,v,s2).
attribute((scale,channel),s2,y).
attribute((scale,type),s2,ordinal).
"""
)
== [("ordinal_scale", "e1"), ("ordinal_scale", "e2")]
)
def test_categorical_scale():
b = soft.blocks["categorical_scale"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,v,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,categorical).
"""
)
== [("categorical_scale", "e1")]
)
def test_c_c_point():
b = soft.blocks["c_c_point"]
assert isinstance(b, Block)
# only x encoding, continuous x
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,point).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== []
)
# continuous x, discrete y
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,point).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
attribute((encoding,binning),e2,10).
"""
)
== []
)
# continuous x, y
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,point).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
entity(scale,v,s2).
attribute((scale,channel),s2,y).
attribute((scale,type),s2,log).
"""
)
== [("c_c_point", "m1")]
)
def test_c_c_line():
b = soft.blocks["c_c_line"]
assert isinstance(b, Block)
# continuous x, y, point mark
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,point).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
entity(scale,v,s2).
attribute((scale,channel),s2,y).
attribute((scale,type),s1,linear).
"""
)
== []
)
# continuous x, y
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,line).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
entity(scale,v,s2).
attribute((scale,channel),s2,y).
attribute((scale,type),s2,log).
"""
)
== [("c_c_line", "m1")]
)
def test_c_c_area():
b = soft.blocks["c_c_area"]
assert isinstance(b, Block)
# continuous y, discrete x
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,area).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,20).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(scale,v,s1).
attribute((scale,channel),s1,y).
attribute((scale,type),s1,linear).
"""
)
== []
)
# continuous x, y
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,area).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
entity(scale,v,s2).
attribute((scale,channel),s2,y).
attribute((scale,type),s2,log).
"""
)
== [("c_c_area", "m1")]
)
def test_c_c_text():
b = soft.blocks["c_c_text"]
assert isinstance(b, Block)
# discrete x, y
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,text).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,20).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(scale,v,s1).
attribute((scale,channel),s1,y).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
# continuous x, y
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,text).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
entity(scale,v,s2).
attribute((scale,channel),s2,y).
attribute((scale,type),s2,linear).
"""
)
== [("c_c_text", "m1")]
)
def test_c_d_point():
b = soft.blocks["c_d_point"]
assert isinstance(b, Block)
# only x encoding, discrete x
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,point).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,10).
"""
)
== []
)
# continuous x, discrete y
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,point).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
attribute((encoding,binning),e2,10).
"""
)
== [("c_d_point", "m1")]
)
def test_c_d_bar():
b = soft.blocks["c_d_bar"]
assert isinstance(b, Block)
# continuous x, y
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,bar).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
entity(scale,v,s2).
attribute((scale,channel),s2,y).
attribute((scale,type),s1,linear).
"""
)
== []
)
# continuous y, discrete x
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,bar).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,10).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
"""
)
== [("c_d_bar", "m1")]
)
def test_c_d_line():
b = soft.blocks["c_d_line"]
assert isinstance(b, Block)
# discrete x, continuous y, bar mark
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,bar).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,10).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
"""
)
== []
)
# continuous y, discrete x
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,line).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,10).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
"""
)
== [("c_d_line", "m1")]
)
def test_c_d_area():
b = soft.blocks["c_d_area"]
assert isinstance(b, Block)
# discrete x, y
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,area).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,10).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
attribute((encoding,binning),e2,10).
"""
)
== []
)
# continuous y, discrete x
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,area).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
entity(scale,v,s2).
attribute((scale,channel),s2,y).
attribute((scale,type),s2,log).
"""
)
== [("c_d_area", "m1")]
)
def test_c_d_text():
b = soft.blocks["c_d_text"]
assert isinstance(b, Block)
# only y, continuous y
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,text).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
entity(scale,v,s1).
attribute((scale,channel),s1,y).
attribute((scale,type),s1,log).
"""
)
== []
)
# continuous y, discrete x
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,text).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,categorical).
entity(scale,v,s2).
attribute((scale,channel),s2,y).
attribute((scale,type),s2,linear).
"""
)
== [("c_d_text", "m1")]
)
def test_c_d_tick():
b = soft.blocks["c_d_tick"]
assert isinstance(b, Block)
# only y, discrete y, root scale
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
attribute((mark,type),m1,tick).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
entity(scale,root,s1).
attribute((scale,channel),s1,y).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
# continuous x, discrete y
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,tick).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
entity(scale,v,s2).
attribute((scale,channel),s2,y).
attribute((scale,type),s2,ordinal).
"""
)
== [("c_d_tick", "m1")]
)
def test_d_d_point():
b = soft.blocks["d_d_point"]
assert isinstance(b, Block)
# only x encoding, discrete x
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,point).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,10).
"""
)
== []
)
# discrete x, y
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,point).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,10).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
attribute((encoding,binning),e2,10).
"""
)
== [("d_d_point", "m1")]
)
def test_d_d_text():
b = soft.blocks["d_d_text"]
assert isinstance(b, Block)
# discrete x, y, point mark
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,point).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,10).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
attribute((encoding,binning),e2,10).
"""
)
== []
)
# discrete x, y
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,text).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,10).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(scale,v,s1).
attribute((scale,channel),s1,y).
attribute((scale,type),s1,ordinal).
"""
)
== [("d_d_text", "m1")]
)
def test_d_d_rect():
b = soft.blocks["d_d_rect"]
assert isinstance(b, Block)
# only y encoding, discrete y
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
attribute((mark,type),m1,rect).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
entity(scale,v,s1).
attribute((scale,channel),s1,y).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
# discrete x, y, one root scale
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
attribute((mark,type),m1,rect).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(encoding,m1,e2).
attribute((encoding,channel),e2,y).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,categorical).
entity(scale,root,s2).
attribute((scale,channel),s2,y).
attribute((scale,type),s2,ordinal).
"""
)
== [("d_d_rect", "m1")]
)
def test_linear_x():
b = soft.blocks["linear_x"]
assert isinstance(b, Block)
# log x
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,log).
"""
)
== []
)
# scale on root
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(scale,root,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== [("linear_x", "e1")]
)
def test_linear_y():
b = soft.blocks["linear_y"]
assert isinstance(b, Block)
# linear x
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== []
)
# scale on root
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
entity(scale,root,s1).
attribute((scale,channel),s1,y).
attribute((scale,type),s1,linear).
"""
)
== [("linear_y", "e1")]
)
def test_linear_color():
b = soft.blocks["linear_color"]
assert isinstance(b, Block)
# ordinal color
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,v,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
# scale on root
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,root,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,linear).
"""
)
== [("linear_color", "e1")]
)
def test_linear_size():
b = soft.blocks["linear_size"]
assert isinstance(b, Block)
# log size
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,size).
entity(scale,v,s1).
attribute((scale,channel),s1,size).
attribute((scale,type),s1,log).
"""
)
== []
)
# scale on root
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,size).
entity(scale,root,s1).
attribute((scale,channel),s1,size).
attribute((scale,type),s1,linear).
"""
)
== [("linear_size", "e1")]
)
def test_linear_text():
b = soft.blocks["linear_text"]
assert isinstance(b, Block)
# log text
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,text).
entity(scale,v,s1).
attribute((scale,channel),s1,text).
attribute((scale,type),s1,log).
"""
)
== []
)
# scale on root
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,text).
entity(scale,root,s1).
attribute((scale,channel),s1,text).
attribute((scale,type),s1,linear).
"""
)
== [("linear_text", "e1")]
)
def test_log_x():
b = soft.blocks["log_x"]
assert isinstance(b, Block)
# linear x
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== []
)
# scale on root
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(scale,root,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,log).
"""
)
== [("log_x", "e1")]
)
def test_log_y():
b = soft.blocks["log_y"]
assert isinstance(b, Block)
# ordinal y
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
entity(scale,v,s1).
attribute((scale,channel),s1,y).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
# scale on root
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
entity(scale,root,s1).
attribute((scale,channel),s1,y).
attribute((scale,type),s1,log).
"""
)
== [("log_y", "e1")]
)
def test_log_color():
b = soft.blocks["log_color"]
assert isinstance(b, Block)
# linear color
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,v,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,linear).
"""
)
== []
)
# scale on root
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,root,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,log).
"""
)
== [("log_color", "e1")]
)
def test_log_size():
b = soft.blocks["log_size"]
assert isinstance(b, Block)
# log color
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,v,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,log).
"""
)
== []
)
# scale on root
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,size).
entity(scale,root,s1).
attribute((scale,channel),s1,size).
attribute((scale,type),s1,log).
"""
)
== [("log_size", "e1")]
)
def test_log_text():
b = soft.blocks["log_text"]
assert isinstance(b, Block)
# ordinal text
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,text).
entity(scale,v,s1).
attribute((scale,channel),s1,text).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
# scale on root
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,text).
entity(scale,root,s1).
attribute((scale,channel),s1,text).
attribute((scale,type),s1,log).
"""
)
== [("log_text", "e1")]
)
def test_ordinal_x():
b = soft.blocks["ordinal_x"]
assert isinstance(b, Block)
# scale on root, linear x
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(scale,root,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
"""
)
== [("ordinal_x", "e1")]
)
def test_ordinal_y():
b = soft.blocks["ordinal_y"]
assert isinstance(b, Block)
# scale on root, log y
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
entity(scale,root,s1).
attribute((scale,channel),s1,y).
attribute((scale,type),s1,log).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
entity(scale,v,s1).
attribute((scale,channel),s1,y).
attribute((scale,type),s1,ordinal).
"""
)
== [("ordinal_y", "e1")]
)
def test_ordinal_color():
b = soft.blocks["ordinal_color"]
assert isinstance(b, Block)
# scale on root, linear color
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,root,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,linear).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,v,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,ordinal).
"""
)
== [("ordinal_color", "e1")]
)
def test_ordinal_size():
b = soft.blocks["ordinal_size"]
assert isinstance(b, Block)
# scale on root, ordinal color
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,root,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,size).
entity(scale,v,s1).
attribute((scale,channel),s1,size).
attribute((scale,type),s1,ordinal).
"""
)
== [("ordinal_size", "e1")]
)
def test_ordinal_shape():
b = soft.blocks["ordinal_shape"]
assert isinstance(b, Block)
# scale on root, ordinal size
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,size).
entity(scale,root,s1).
attribute((scale,channel),s1,size).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,shape).
entity(scale,v,s1).
attribute((scale,channel),s1,shape).
attribute((scale,type),s1,ordinal).
"""
)
== [("ordinal_shape", "e1")]
)
def test_ordinal_text():
b = soft.blocks["ordinal_text"]
assert isinstance(b, Block)
# scale on root, linear text
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,text).
entity(scale,root,s1).
attribute((scale,channel),s1,text).
attribute((scale,type),s1,linear).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,text).
entity(scale,v,s1).
attribute((scale,channel),s1,text).
attribute((scale,type),s1,ordinal).
"""
)
== [("ordinal_text", "e1")]
)
def test_ordinal_detail():
b = soft.blocks["ordinal_detail"]
assert isinstance(b, Block)
# scale on root, ordinal x
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(scale,root,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,detail).
entity(scale,v,s1).
attribute((scale,channel),s1,detail).
attribute((scale,type),s1,ordinal).
"""
)
== [("ordinal_detail", "e1")]
)
def test_categorical_color():
b = soft.blocks["categorical_color"]
assert isinstance(b, Block)
# scale on root, linear color
assert (
list_preferences(
b.program
+ """
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,root,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,linear).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,v,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,categorical).
"""
)
== [("categorical_color", "e1")]
)
def test_aggregate_count():
b = soft.blocks["aggregate_count"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,aggregate),e1,count).
"""
)
== [("aggregate_count", "e1")]
)
def test_aggregate_mean():
b = soft.blocks["aggregate_mean"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,aggregate),e1,count).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,aggregate),e1,mean).
"""
)
== [("aggregate_mean", "e1")]
)
def test_aggregate_median():
b = soft.blocks["aggregate_median"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,aggregate),e1,min).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,aggregate),e1,median).
"""
)
== [("aggregate_median", "e1")]
)
def test_aggregate_min():
b = soft.blocks["aggregate_min"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,aggregate),e1,max).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,aggregate),e1,min).
"""
)
== [("aggregate_min", "e1")]
)
def test_aggregate_max():
b = soft.blocks["aggregate_max"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,aggregate),e1,min).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,aggregate),e1,max).
"""
)
== [("aggregate_max", "e1")]
)
def test_aggregate_stdev():
b = soft.blocks["aggregate_stdev"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,aggregate),e1,mean).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,aggregate),e1,stdev).
"""
)
== [("aggregate_stdev", "e1")]
)
def test_aggregate_sum():
b = soft.blocks["aggregate_sum"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,aggregate),e1,count).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,aggregate),e1,sum).
"""
)
== [("aggregate_sum", "e1")]
)
def test_stack_zero():
b = soft.blocks["stack_zero"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,aggregate),e1,count).
attribute((encoding,stack),e1,zero).
"""
)
== [("stack_zero", "e1")]
)
def test_stack_center():
b = soft.blocks["stack_center"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,aggregate),e1,count).
attribute((encoding,stack),e1,zero).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,aggregate),e1,count).
attribute((encoding,stack),e1,center).
"""
)
== [("stack_center", "e1")]
)
def test_stack_normalize():
b = soft.blocks["stack_normalize"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,aggregate),e1,count).
attribute((encoding,stack),e1,center).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,aggregate),e1,count).
attribute((encoding,stack),e1,normalize).
"""
)
== [("stack_normalize", "e1")]
)
def test_value_point():
b = soft.blocks["value_point"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
attribute((mark,type),m1,line).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
attribute((mark,type),m1,point).
"""
)
== [("value_point", "m1")]
)
def test_value_bar():
b = soft.blocks["value_bar"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
attribute((mark,type),m1,bar).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
attribute((mark,type),m1,bar).
entity(mark,v,m2).
attribute((mark,type),m2,bar).
"""
)
== [("value_bar", "m1"), ("value_bar", "m2")]
)
def test_value_line():
b = soft.blocks["value_line"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
attribute((mark,type),m1,line).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
attribute((mark,type),m1,line).
"""
)
== [("value_line", "m1")]
)
def test_value_area():
b = soft.blocks["value_area"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
attribute((mark,type),m1,bar).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
attribute((mark,type),m1,area).
"""
)
== [("value_area", "m1")]
)
def test_value_text():
b = soft.blocks["value_text"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
attribute((mark,type),m1,tick).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
attribute((mark,type),m1,text).
"""
)
== [("value_text", "m1")]
)
def test_value_tick():
b = soft.blocks["value_tick"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
attribute((mark,type),m1,point).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
attribute((mark,type),m1,tick).
"""
)
== [("value_tick", "m1")]
)
def test_value_rect():
b = soft.blocks["value_rect"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
attribute((mark,type),m1,area).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
attribute((mark,type),m1,rect).
entity(mark,v,m2).
attribute((mark,type),m2,rect).
"""
)
== [("value_rect", "m1"), ("value_rect", "m2")]
)
def test_summary_point():
b = soft.blocks["summary_point"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
attribute((mark,type),m1,point).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
attribute((mark,type),m1,point).
"""
)
== [("summary_point", "m1")]
)
def test_summary_bar():
b = soft.blocks["summary_bar"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
attribute((mark,type),m1,bar).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
attribute((mark,type),m1,bar).
entity(mark,v,m2).
attribute((mark,type),m2,bar).
"""
)
== [("summary_bar", "m1"), ("summary_bar", "m2")]
)
def test_summary_line():
b = soft.blocks["summary_line"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
attribute((mark,type),m1,bar).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
attribute((mark,type),m1,line).
"""
)
== [("summary_line", "m1")]
)
def test_summary_area():
b = soft.blocks["summary_area"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
attribute((mark,type),m1,bar).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
attribute((mark,type),m1,area).
"""
)
== [("summary_area", "m1")]
)
def test_summary_text():
b = soft.blocks["summary_text"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
attribute((mark,type),m1,tick).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
attribute((mark,type),m1,text).
"""
)
== [("summary_text", "m1")]
)
def test_summary_tick():
b = soft.blocks["summary_tick"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
attribute((mark,type),m1,point).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
attribute((mark,type),m1,tick).
"""
)
== [("summary_tick", "m1")]
)
def test_summary_rect():
b = soft.blocks["summary_rect"]
assert isinstance(b, Block)
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
attribute((mark,type),m1,area).
"""
)
== []
)
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
attribute((mark,type),m1,rect).
entity(mark,v,m2).
attribute((mark,type),m2,rect).
"""
)
== [("summary_rect", "m1"), ("summary_rect", "m2")]
)
def test_value_continuous_x():
b = soft.blocks["value_continuous_x"]
assert isinstance(b, Block)
# discrete x
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,10).
"""
)
== []
)
# continuous x
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== [("value_continuous_x", "e1")]
)
def test_value_continuous_y():
b = soft.blocks["value_continuous_y"]
assert isinstance(b, Block)
# discrete y
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,binning),e1,10).
"""
)
== []
)
# continuous y
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
entity(scale,v,s1).
attribute((scale,channel),s1,y).
attribute((scale,type),s1,log).
"""
)
== [("value_continuous_y", "e1")]
)
def test_value_continuous_color():
b = soft.blocks["value_continuous_color"]
assert isinstance(b, Block)
# discrete color, scale on root
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,root,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,categorical).
"""
)
== []
)
# continuous color
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,v,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,linear).
"""
)
== [("value_continuous_color", "e1")]
)
def test_value_continuous_size():
b = soft.blocks["value_continuous_size"]
assert isinstance(b, Block)
# discrete size
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,size).
attribute((encoding,binning),e1,10).
"""
)
== []
)
# continuous size
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,size).
entity(scale,v,s1).
attribute((scale,channel),s1,size).
attribute((scale,type),s1,linear).
"""
)
== [("value_continuous_size", "e1")]
)
def test_value_continuous_text():
b = soft.blocks["value_continuous_text"]
assert isinstance(b, Block)
# continuous text, summary task
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,text).
"""
)
== []
)
# continuous text
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,text).
entity(scale,v,s1).
attribute((scale,channel),s1,text).
attribute((scale,type),s1,linear).
"""
)
== [("value_continuous_text", "e1")]
)
def test_value_discrete_x():
b = soft.blocks["value_discrete_x"]
assert isinstance(b, Block)
# continuous x
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
"""
)
== []
)
# discrete x
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,20).
"""
)
== [("value_discrete_x", "e1")]
)
def test_value_discrete_y():
b = soft.blocks["value_discrete_y"]
assert isinstance(b, Block)
# continuous y
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
"""
)
== []
)
# discrete y
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,binning),e1,20).
"""
)
== [("value_discrete_y", "e1")]
)
def test_value_discrete_color():
b = soft.blocks["value_discrete_color"]
assert isinstance(b, Block)
# discrete color, summary task
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,v,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,categorical).
"""
)
== []
)
# discrete color, scale on root
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,root,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,ordinal).
"""
)
== [("value_discrete_color", "e1")]
)
def test_value_discrete_size():
b = soft.blocks["value_discrete_size"]
assert isinstance(b, Block)
# continuous size
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,size).
entity(scale,v,s1).
attribute((scale,channel),s1,size).
attribute((scale,type),s1,linear).
"""
)
== []
)
# discrete color, scale on root
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,size).
entity(scale,root,s1).
attribute((scale,channel),s1,size).
attribute((scale,type),s1,ordinal).
"""
)
== [("value_discrete_size", "e1")]
)
def test_value_discrete_shape():
b = soft.blocks["value_discrete_shape"]
assert isinstance(b, Block)
# discrete shape, summary task
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,shape).
entity(scale,v,s1).
attribute((scale,channel),s1,shape).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
# discrete shape, scale on root
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,shape).
entity(scale,root,s1).
attribute((scale,channel),s1,shape).
attribute((scale,type),s1,ordinal).
"""
)
== [("value_discrete_shape", "e1")]
)
def test_value_discrete_text():
b = soft.blocks["value_discrete_text"]
assert isinstance(b, Block)
# continuous text
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,text).
"""
)
== []
)
# discrete text
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,text).
attribute((encoding,binning),e1,10).
"""
)
== [("value_discrete_text", "e1")]
)
def test_value_discrete_detail():
b = soft.blocks["value_discrete_detail"]
assert isinstance(b, Block)
# discrete detail, summary task
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,detail).
entity(scale,v,s1).
attribute((scale,channel),s1,detail).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
# discrete detail, scale on root
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,detail).
entity(scale,root,s1).
attribute((scale,channel),s1,detail).
attribute((scale,type),s1,ordinal).
"""
)
== [("value_discrete_detail", "e1")]
)
def test_summary_continuous_x():
b = soft.blocks["summary_continuous_x"]
assert isinstance(b, Block)
# discrete x
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,10).
"""
)
== []
)
# continuous x
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
entity(scale,v,s1).
attribute((scale,channel),s1,x).
attribute((scale,type),s1,linear).
"""
)
== [("summary_continuous_x", "e1")]
)
def test_summary_continuous_y():
b = soft.blocks["summary_continuous_y"]
assert isinstance(b, Block)
# discrete y
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,binning),e1,10).
"""
)
== []
)
# continuous y
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
entity(scale,v,s1).
attribute((scale,channel),s1,y).
attribute((scale,type),s1,log).
"""
)
== [("summary_continuous_y", "e1")]
)
def test_summary_continuous_color():
b = soft.blocks["summary_continuous_color"]
assert isinstance(b, Block)
# discrete color, scale on root
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,root,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,categorical).
"""
)
== []
)
# continuous color
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,v,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,linear).
"""
)
== [("summary_continuous_color", "e1")]
)
def test_summary_continuous_size():
b = soft.blocks["summary_continuous_size"]
assert isinstance(b, Block)
# discrete size
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,size).
attribute((encoding,binning),e1,10).
"""
)
== []
)
# continuous size
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,size).
entity(scale,v,s1).
attribute((scale,channel),s1,size).
attribute((scale,type),s1,linear).
"""
)
== [("summary_continuous_size", "e1")]
)
def test_summary_continuous_text():
b = soft.blocks["summary_continuous_text"]
assert isinstance(b, Block)
# continuous text, value task
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,text).
"""
)
== []
)
# continuous text
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,text).
entity(scale,v,s1).
attribute((scale,channel),s1,text).
attribute((scale,type),s1,linear).
"""
)
== [("summary_continuous_text", "e1")]
)
def test_summary_discrete_x():
b = soft.blocks["summary_discrete_x"]
assert isinstance(b, Block)
# continuous x
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
"""
)
== []
)
# discrete x
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,x).
attribute((encoding,binning),e1,20).
"""
)
== [("summary_discrete_x", "e1")]
)
def test_summary_discrete_y():
b = soft.blocks["summary_discrete_y"]
assert isinstance(b, Block)
# continuous y
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
"""
)
== []
)
# discrete y
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,y).
attribute((encoding,binning),e1,20).
"""
)
== [("summary_discrete_y", "e1")]
)
def test_summary_discrete_color():
b = soft.blocks["summary_discrete_color"]
assert isinstance(b, Block)
# discrete color, value task
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,v,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,categorical).
"""
)
== []
)
# discrete color, scale on root
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,color).
entity(scale,root,s1).
attribute((scale,channel),s1,color).
attribute((scale,type),s1,ordinal).
"""
)
== [("summary_discrete_color", "e1")]
)
def test_summary_discrete_size():
b = soft.blocks["summary_discrete_size"]
assert isinstance(b, Block)
# continuous size
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,size).
entity(scale,v,s1).
attribute((scale,channel),s1,size).
attribute((scale,type),s1,linear).
"""
)
== []
)
# discrete color, scale on root
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,size).
entity(scale,root,s1).
attribute((scale,channel),s1,size).
attribute((scale,type),s1,ordinal).
"""
)
== [("summary_discrete_size", "e1")]
)
def test_summary_discrete_shape():
b = soft.blocks["summary_discrete_shape"]
assert isinstance(b, Block)
# discrete shape, value task
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,shape).
entity(scale,v,s1).
attribute((scale,channel),s1,shape).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
# discrete shape, scale on root
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,shape).
entity(scale,root,s1).
attribute((scale,channel),s1,shape).
attribute((scale,type),s1,ordinal).
"""
)
== [("summary_discrete_shape", "e1")]
)
def test_summary_discrete_text():
b = soft.blocks["summary_discrete_text"]
assert isinstance(b, Block)
# continuous text
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,text).
"""
)
== []
)
# discrete text
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,text).
attribute((encoding,binning),e1,10).
"""
)
== [("summary_discrete_text", "e1")]
)
def test_summary_discrete_detail():
b = soft.blocks["summary_discrete_detail"]
assert isinstance(b, Block)
# discrete detail, value task
assert (
list_preferences(
b.program
+ """
attribute(task,root,value).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,detail).
entity(scale,v,s1).
attribute((scale,channel),s1,detail).
attribute((scale,type),s1,ordinal).
"""
)
== []
)
# discrete detail, scale on root
assert (
list_preferences(
b.program
+ """
attribute(task,root,summary).
entity(view,root,v).
entity(mark,v,m1).
entity(encoding,m1,e1).
attribute((encoding,channel),e1,detail).
entity(scale,root,s1).
attribute((scale,channel),s1,detail).
attribute((scale,type),s1,ordinal).
"""
)
== [("summary_discrete_detail", "e1")]
)
| 20.685927
| 85
| 0.537953
| 13,264
| 122,440
| 4.885781
| 0.015305
| 0.122768
| 0.099159
| 0.103541
| 0.918695
| 0.876074
| 0.863159
| 0.853221
| 0.840568
| 0.831448
| 0
| 0.027119
| 0.295876
| 122,440
| 5,918
| 86
| 20.689422
| 0.724566
| 0.033371
| 0
| 0.777895
| 0
| 0
| 0.592238
| 0.395322
| 0
| 0
| 0
| 0
| 0.091542
| 1
| 0.02742
| false
| 0
| 0.000633
| 0
| 0.028475
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1392beb94de7b5e3cb431781e21b51e2411ff6d6
| 201
|
py
|
Python
|
models/__init__.py
|
bk-m/release-ink
|
ac25f5f1fe8b85c7d1f4b7cab2c68a1139c7d823
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
bk-m/release-ink
|
ac25f5f1fe8b85c7d1f4b7cab2c68a1139c7d823
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
bk-m/release-ink
|
ac25f5f1fe8b85c7d1f4b7cab2c68a1139c7d823
|
[
"MIT"
] | null | null | null |
import marshmallow_dataclass
from .Config import Config
from .Entry import Entry
ConfigSchema = marshmallow_dataclass.class_schema(Config)()
EntrySchema = marshmallow_dataclass.class_schema(Entry)()
| 25.125
| 59
| 0.840796
| 23
| 201
| 7.130435
| 0.434783
| 0.365854
| 0.304878
| 0.378049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089552
| 201
| 7
| 60
| 28.714286
| 0.896175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
13bd225058ab502b1e65a545f3c03469b4369568
| 40,160
|
py
|
Python
|
tests/testflows/ldap/authentication/tests/authentications.py
|
pdv-ru/ClickHouse
|
0ff975bcf3008fa6c6373cbdfed16328e3863ec5
|
[
"Apache-2.0"
] | 15,577
|
2019-09-23T11:57:53.000Z
|
2022-03-31T18:21:48.000Z
|
tests/testflows/ldap/authentication/tests/authentications.py
|
pdv-ru/ClickHouse
|
0ff975bcf3008fa6c6373cbdfed16328e3863ec5
|
[
"Apache-2.0"
] | 16,476
|
2019-09-23T11:47:00.000Z
|
2022-03-31T23:06:01.000Z
|
tests/testflows/ldap/authentication/tests/authentications.py
|
pdv-ru/ClickHouse
|
0ff975bcf3008fa6c6373cbdfed16328e3863ec5
|
[
"Apache-2.0"
] | 3,633
|
2019-09-23T12:18:28.000Z
|
2022-03-31T15:55:48.000Z
|
# -*- coding: utf-8 -*-
import random
import time
from helpers.common import Pool, join
from testflows.core import *
from testflows.asserts import error
from ldap.authentication.tests.common import *
from ldap.authentication.requirements import *
servers = {
"openldap1": {
"host": "openldap1",
"port": "389",
"enable_tls": "no",
"auth_dn_prefix": "cn=",
"auth_dn_suffix": ",ou=users,dc=company,dc=com"
},
"openldap2": {
"host": "openldap2",
"port": "636",
"enable_tls": "yes",
"auth_dn_prefix": "cn=",
"auth_dn_suffix": ",ou=users,dc=company,dc=com",
"tls_require_cert": "never",
}
}
@TestStep(When)
@Name("I login as {username} and execute query")
@Args(format_name=True)
def login_and_execute_query(self, username, password, exitcode=None, message=None, steps=True):
"""Execute query as some user.
"""
self.context.node.query("SELECT 1",
settings=[("user", username), ("password", password)],
exitcode=exitcode or 0,
message=message, steps=steps)
@TestScenario
def add_user_to_ldap_and_login(self, server, user=None, ch_user=None, login=None, exitcode=None, message=None, rbac=False):
"""Add user to LDAP and ClickHouse and then try to login.
"""
self.context.ldap_node = self.context.cluster.node(server)
if ch_user is None:
ch_user = {}
if login is None:
login = {}
if user is None:
user = {"cn": "myuser", "userpassword": "myuser"}
with ldap_user(**user) as user:
ch_user["username"] = ch_user.get("username", user["cn"])
ch_user["server"] = ch_user.get("server", user["_server"])
with ldap_authenticated_users(ch_user, config_file=f"ldap_users_{getuid()}.xml", restart=True, rbac=rbac):
username = login.get("username", user["cn"])
password = login.get("password", user["userpassword"])
login_and_execute_query(username=username, password=password, exitcode=exitcode, message=message)
@TestScenario
@Requirements(
RQ_SRS_007_LDAP_Authentication_Parallel("1.0"),
RQ_SRS_007_LDAP_Authentication_Parallel_ValidAndInvalid("1.0")
)
def parallel_login(self, server, user_count=10, timeout=300, rbac=False):
"""Check that login of valid and invalid LDAP authenticated users works in parallel.
"""
self.context.ldap_node = self.context.cluster.node(server)
user = None
users = [{"cn": f"parallel_user{i}", "userpassword": randomword(20)} for i in range(user_count)]
with ldap_users(*users):
with ldap_authenticated_users(*[{"username": user["cn"], "server": server} for user in users], rbac=rbac):
def login_with_valid_username_and_password(users, i, iterations=10):
with When(f"valid users try to login #{i}"):
for i in range(iterations):
random_user = users[random.randint(0, len(users)-1)]
login_and_execute_query(username=random_user["cn"], password=random_user["userpassword"], steps=False)
def login_with_valid_username_and_invalid_password(users, i, iterations=10):
with When(f"users try to login with valid username and invalid password #{i}"):
for i in range(iterations):
random_user = users[random.randint(0, len(users)-1)]
login_and_execute_query(username=random_user["cn"],
password=(random_user["userpassword"] + randomword(1)),
exitcode=4,
message=f"DB::Exception: {random_user['cn']}: Authentication failed: password is incorrect or there is no user with such name",
steps=False)
def login_with_invalid_username_and_valid_password(users, i, iterations=10):
with When(f"users try to login with invalid username and valid password #{i}"):
for i in range(iterations):
random_user = dict(users[random.randint(0, len(users)-1)])
random_user["cn"] += randomword(1)
login_and_execute_query(username=random_user["cn"],
password=random_user["userpassword"],
exitcode=4,
message=f"DB::Exception: {random_user['cn']}: Authentication failed: password is incorrect or there is no user with such name",
steps=False)
with When("I login in parallel"):
tasks = []
with Pool(4) as pool:
try:
for i in range(5):
tasks.append(pool.apply_async(login_with_valid_username_and_password, (users, i, 50,)))
tasks.append(pool.apply_async(login_with_valid_username_and_invalid_password, (users, i, 50,)))
tasks.append(pool.apply_async(login_with_invalid_username_and_valid_password, (users, i, 50,)))
finally:
with Then("it should work"):
join(tasks, timeout=timeout)
@TestScenario
@Requirements(
RQ_SRS_007_LDAP_Authentication_Invalid("1.0"),
RQ_SRS_007_LDAP_Authentication_Invalid_DeletedUser("1.0")
)
def login_after_user_is_deleted_from_ldap(self, server, rbac=False):
"""Check that login fails after user is deleted from LDAP.
"""
self.context.ldap_node = self.context.cluster.node(server)
user = None
try:
with Given(f"I add user to LDAP"):
user = {"cn": "myuser", "userpassword": "myuser"}
user = add_user_to_ldap(**user)
with ldap_authenticated_users({"username": user["cn"], "server": server}, config_file=f"ldap_users_{getuid()}.xml",
restart=True, rbac=rbac):
login_and_execute_query(username=user["cn"], password=user["userpassword"])
with When("I delete this user from LDAP"):
delete_user_from_ldap(user)
with Then("when I try to login again it should fail"):
login_and_execute_query(username=user["cn"], password=user["userpassword"],
exitcode=4,
message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name"
)
finally:
with Finally("I make sure LDAP user is deleted"):
if user is not None:
delete_user_from_ldap(user, exitcode=None)
@TestScenario
@Requirements(
RQ_SRS_007_LDAP_Authentication_Invalid("1.0"),
RQ_SRS_007_LDAP_Authentication_PasswordChanged("1.0")
)
def login_after_user_password_changed_in_ldap(self, server, rbac=False):
"""Check that login fails after user password is changed in LDAP.
"""
self.context.ldap_node = self.context.cluster.node(server)
user = None
try:
with Given(f"I add user to LDAP"):
user = {"cn": "myuser", "userpassword": "myuser"}
user = add_user_to_ldap(**user)
with ldap_authenticated_users({"username": user["cn"], "server": server}, config_file=f"ldap_users_{getuid()}.xml",
restart=True, rbac=rbac):
login_and_execute_query(username=user["cn"], password=user["userpassword"])
with When("I change user password in LDAP"):
change_user_password_in_ldap(user, "newpassword")
with Then("when I try to login again it should fail"):
login_and_execute_query(username=user["cn"], password=user["userpassword"],
exitcode=4,
message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name"
)
with And("when I try to login with the new password it should work"):
login_and_execute_query(username=user["cn"], password="newpassword")
finally:
with Finally("I make sure LDAP user is deleted"):
if user is not None:
delete_user_from_ldap(user, exitcode=None)
@TestScenario
@Requirements(
RQ_SRS_007_LDAP_Authentication_Invalid("1.0"),
RQ_SRS_007_LDAP_Authentication_UsernameChanged("1.0")
)
def login_after_user_cn_changed_in_ldap(self, server, rbac=False):
"""Check that login fails after user cn is changed in LDAP.
"""
self.context.ldap_node = self.context.cluster.node(server)
user = None
new_user = None
try:
with Given(f"I add user to LDAP"):
user = {"cn": "myuser", "userpassword": "myuser"}
user = add_user_to_ldap(**user)
with ldap_authenticated_users({"username": user["cn"], "server": server},
config_file=f"ldap_users_{getuid()}.xml", restart=True, rbac=rbac):
login_and_execute_query(username=user["cn"], password=user["userpassword"])
with When("I change user password in LDAP"):
new_user = change_user_cn_in_ldap(user, "myuser2")
with Then("when I try to login again it should fail"):
login_and_execute_query(username=user["cn"], password=user["userpassword"],
exitcode=4,
message=f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name"
)
finally:
with Finally("I make sure LDAP user is deleted"):
if new_user is not None:
delete_user_from_ldap(new_user, exitcode=None)
@TestScenario
@Requirements(
RQ_SRS_007_LDAP_Authentication_Valid("1.0"),
RQ_SRS_007_LDAP_Authentication_LDAPServerRestart("1.0")
)
def login_after_ldap_server_is_restarted(self, server, timeout=300, rbac=False):
"""Check that login succeeds after LDAP server is restarted.
"""
self.context.ldap_node = self.context.cluster.node(server)
user = None
try:
with Given(f"I add user to LDAP"):
user = {"cn": "myuser", "userpassword": getuid()}
user = add_user_to_ldap(**user)
with ldap_authenticated_users({"username": user["cn"], "server": server}, rbac=rbac):
login_and_execute_query(username=user["cn"], password=user["userpassword"])
with When("I restart LDAP server"):
self.context.ldap_node.restart()
with Then("I try to login until it works", description=f"timeout {timeout} sec"):
started = time.time()
while True:
r = self.context.node.query("SELECT 1",
settings=[("user", user["cn"]), ("password", user["userpassword"])],
no_checks=True)
if r.exitcode == 0:
break
assert time.time() - started < timeout, error(r.output)
finally:
with Finally("I make sure LDAP user is deleted"):
if user is not None:
delete_user_from_ldap(user, exitcode=None)
@TestScenario
@Requirements(
RQ_SRS_007_LDAP_Authentication_Valid("1.0"),
RQ_SRS_007_LDAP_Authentication_ClickHouseServerRestart("1.0")
)
def login_after_clickhouse_server_is_restarted(self, server, timeout=300, rbac=False):
"""Check that login succeeds after ClickHouse server is restarted.
"""
self.context.ldap_node = self.context.cluster.node(server)
user = None
try:
with Given(f"I add user to LDAP"):
user = {"cn": "myuser", "userpassword": getuid()}
user = add_user_to_ldap(**user)
with ldap_authenticated_users({"username": user["cn"], "server": server}, rbac=rbac):
login_and_execute_query(username=user["cn"], password=user["userpassword"])
with When("I restart ClickHouse server"):
self.context.node.restart()
with Then("I try to login until it works", description=f"timeout {timeout} sec"):
started = time.time()
while True:
r = self.context.node.query("SELECT 1",
settings=[("user", user["cn"]), ("password", user["userpassword"])],
no_checks=True)
if r.exitcode == 0:
break
assert time.time() - started < timeout, error(r.output)
finally:
with Finally("I make sure LDAP user is deleted"):
if user is not None:
delete_user_from_ldap(user, exitcode=None)
@TestScenario
@Requirements(
RQ_SRS_007_LDAP_Authentication_Invalid("1.0"),
RQ_SRS_007_LDAP_Authentication_Password_Empty("1.0")
)
def valid_username_with_valid_empty_password(self, server, rbac=False):
"""Check that we can't login using valid username that has empty password.
"""
user = {"cn": "empty_password", "userpassword": ""}
exitcode = 4
message = f"DB::Exception: {user['cn']}: Authentication failed: password is incorrect or there is no user with such name"
add_user_to_ldap_and_login(user=user, exitcode=exitcode, message=message, server=server, rbac=rbac)
@TestScenario
@Requirements(
RQ_SRS_007_LDAP_Authentication_Invalid("1.0"),
RQ_SRS_007_LDAP_Authentication_Password_Empty("1.0")
)
def valid_username_and_invalid_empty_password(self, server, rbac=False):
"""Check that we can't login using valid username but invalid empty password.
"""
username = "user_non_empty_password"
user = {"cn": username, "userpassword": username}
login = {"password": ""}
exitcode = 4
message = f"DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name"
add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server, rbac=rbac)
@TestScenario
@Requirements(
RQ_SRS_007_LDAP_Authentication_Valid("1.0")
)
def valid_username_and_password(self, server, rbac=False):
"""Check that we can login using valid username and password.
"""
username = "valid_username_and_password"
user = {"cn": username, "userpassword": username}
with When(f"I add user {username} to LDAP and try to login"):
add_user_to_ldap_and_login(user=user, server=server, rbac=rbac)
@TestScenario
@Requirements(
RQ_SRS_007_LDAP_Authentication_Invalid("1.0")
)
def valid_username_and_password_invalid_server(self, server=None, rbac=False):
"""Check that we can't login using valid username and valid
password but for a different server.
"""
self.context.ldap_node = self.context.cluster.node("openldap1")
user = {"username": "user2", "userpassword": "user2", "server": "openldap1"}
exitcode = 4
message = f"DB::Exception: user2: Authentication failed: password is incorrect or there is no user with such name"
with ldap_authenticated_users(user, config_file=f"ldap_users_{getuid()}.xml", restart=True, rbac=rbac):
login_and_execute_query(username="user2", password="user2", exitcode=exitcode, message=message)
@TestScenario
@Requirements(
RQ_SRS_007_LDAP_Authentication_Valid("1.0"),
RQ_SRS_007_LDAP_Authentication_Username_Long("1.0"),
RQ_SRS_007_LDAP_Configuration_User_Name_Long("1.0")
)
def valid_long_username_and_short_password(self, server, rbac=False):
"""Check that we can login using valid very long username and short password.
"""
username = "long_username_12345678901234567890123456789012345678901234567890123456789012345678901234567890"
user = {"cn": username, "userpassword": "long_username"}
add_user_to_ldap_and_login(user=user, server=server, rbac=rbac)
@TestScenario
@Requirements(
RQ_SRS_007_LDAP_Authentication_Invalid("1.0")
)
def invalid_long_username_and_valid_short_password(self, server, rbac=False):
"""Check that we can't login using slightly invalid long username but valid password.
"""
username = "long_username_12345678901234567890123456789012345678901234567890123456789012345678901234567890"
user = {"cn": username, "userpassword": "long_username"}
login = {"username": f"{username}?"}
exitcode = 4
message=f"DB::Exception: {login['username']}: Authentication failed: password is incorrect or there is no user with such name"
add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server, rbac=rbac)
@TestScenario
@Requirements(
RQ_SRS_007_LDAP_Authentication_Valid("1.0"),
RQ_SRS_007_LDAP_Authentication_Password_Long("1.0")
)
def valid_short_username_and_long_password(self, server, rbac=False):
"""Check that we can login using valid short username with very long password.
"""
username = "long_password"
user = {"cn": username, "userpassword": "long_password_12345678901234567890123456789012345678901234567890123456789012345678901234567890"}
add_user_to_ldap_and_login(user=user, server=server, rbac=rbac)
@TestScenario
@Requirements(
RQ_SRS_007_LDAP_Authentication_Invalid("1.0")
)
def valid_short_username_and_invalid_long_password(self, server, rbac=False):
"""Check that we can't login using valid short username and invalid long password.
"""
username = "long_password"
user = {"cn": username, "userpassword": "long_password_12345678901234567890123456789012345678901234567890123456789012345678901234567890"}
login = {"password": user["userpassword"] + "1"}
exitcode = 4
message=f"DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name"
add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server, rbac=rbac)
@TestScenario
@Requirements(
RQ_SRS_007_LDAP_Authentication_Invalid("1.0")
)
def valid_username_and_invalid_password(self, server, rbac=False):
"""Check that we can't login using valid username and invalid password.
"""
username = "valid_username_and_invalid_password"
user = {"cn": username, "userpassword": username}
login = {"password": user["userpassword"] + "1"}
exitcode = 4
message=f"DB::Exception: {username}: Authentication failed: password is incorrect or there is no user with such name"
add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server, rbac=rbac)
@TestScenario
@Requirements(
RQ_SRS_007_LDAP_Authentication_Invalid("1.0")
)
def invalid_username_and_valid_password(self, server, rbac=False):
"""Check that we can't login using slightly invalid username but valid password.
"""
username = "invalid_username_and_valid_password"
user = {"cn": username, "userpassword": username}
login = {"username": user["cn"] + "1"}
exitcode = 4
message=f"DB::Exception: {login['username']}: Authentication failed: password is incorrect or there is no user with such name"
add_user_to_ldap_and_login(user=user, login=login, exitcode=exitcode, message=message, server=server, rbac=rbac)
@TestScenario
@Requirements(
RQ_SRS_007_LDAP_Authentication_Valid("1.0"),
RQ_SRS_007_LDAP_Authentication_Username_UTF8("1.0"),
RQ_SRS_007_LDAP_Configuration_User_Name_UTF8("1.0")
)
def valid_utf8_username_and_ascii_password(self, server, rbac=False):
"""Check that we can login using valid utf-8 username with ascii password.
"""
username = "utf8_username_Gãńdåłf_Thê_Gręât"
user = {"cn": username, "userpassword": "utf8_username"}
add_user_to_ldap_and_login(user=user, server=server, rbac=rbac)
@TestScenario
@Requirements(
RQ_SRS_007_LDAP_Authentication_Valid("1.0"),
RQ_SRS_007_LDAP_Authentication_Password_UTF8("1.0")
)
def valid_ascii_username_and_utf8_password(self, server, rbac=False):
"""Check that we can login using valid ascii username with utf-8 password.
"""
username = "utf8_password"
user = {"cn": username, "userpassword": "utf8_password_Gãńdåłf_Thê_Gręât"}
add_user_to_ldap_and_login(user=user, server=server, rbac=rbac)
@TestScenario
def empty_username_and_empty_password(self, server=None, rbac=False):
"""Check that we can login using empty username and empty password as
it will use the default user and that has an empty password.
"""
login_and_execute_query(username="", password="")
@TestScenario
@Tags("verification_cooldown")
@Requirements(
RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown_Default("1.0")
)
def default_verification_cooldown_value(self, server, rbac=False):
"""Check that the default value (0) for the verification cooldown parameter
disables caching and forces contacting the LDAP server for each
authentication request.
"""
error_message = "DB::Exception: testVCD: Authentication failed: password is incorrect or there is no user with such name"
error_exitcode = 4
user = None
with Given("I have an LDAP configuration that uses the default verification_cooldown value (0)"):
servers = {"openldap1": {"host": "openldap1", "port": "389", "enable_tls": "no",
"auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com"
}}
self.context.ldap_node = self.context.cluster.node(server)
try:
with Given("I add user to LDAP"):
user = {"cn": "testVCD", "userpassword": "testVCD"}
user = add_user_to_ldap(**user)
with ldap_servers(servers):
with ldap_authenticated_users({"username": user["cn"], "server": server}, config_file=f"ldap_users_{getuid()}.xml"):
with When("I login and execute a query"):
login_and_execute_query(username=user["cn"], password=user["userpassword"])
with And("I change user password in LDAP"):
change_user_password_in_ldap(user, "newpassword")
with Then("when I try to login immediately with the old user password it should fail"):
login_and_execute_query(username=user["cn"], password=user["userpassword"],
exitcode=error_exitcode, message=error_message)
finally:
with Finally("I make sure LDAP user is deleted"):
if user is not None:
delete_user_from_ldap(user, exitcode=None)
@TestScenario
@Tags("verification_cooldown")
@Requirements(
RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown("1.0")
)
def valid_verification_cooldown_value_cn_change(self, server, rbac=False):
"""Check that we can perform requests without contacting the LDAP server
after successful authentication when the verification_cooldown parameter
is set and the user cn is changed.
"""
user = None
new_user = None
with Given("I have an LDAP configuration that sets verification_cooldown parameter to 2 sec"):
servers = { "openldap1": {
"host": "openldap1",
"port": "389",
"enable_tls": "no",
"auth_dn_prefix": "cn=",
"auth_dn_suffix": ",ou=users,dc=company,dc=com",
"verification_cooldown": "600"
}}
self.context.ldap_node = self.context.cluster.node(server)
try:
with Given("I add user to LDAP"):
user = {"cn": "testVCD", "userpassword": "testVCD"}
user = add_user_to_ldap(**user)
with ldap_servers(servers):
with ldap_authenticated_users({"username": user["cn"], "server": server}, config_file=f"ldap_users_{getuid()}.xml"):
with When("I login and execute a query"):
login_and_execute_query(username=user["cn"], password=user["userpassword"])
with And("I change user cn in LDAP"):
new_user = change_user_cn_in_ldap(user, "testVCD2")
with Then("when I try to login again with the old user cn it should work"):
login_and_execute_query(username=user["cn"], password=user["userpassword"])
finally:
with Finally("I make sure LDAP user is deleted"):
if new_user is not None:
delete_user_from_ldap(new_user, exitcode=None)
@TestScenario
@Tags("verification_cooldown")
@Requirements(
RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown("1.0")
)
def valid_verification_cooldown_value_password_change(self, server, rbac=False):
"""Check that we can perform requests without contacting the LDAP server
after successful authentication when the verification_cooldown parameter
is set and the user password is changed.
"""
user = None
with Given("I have an LDAP configuration that sets verification_cooldown parameter to 2 sec"):
servers = { "openldap1": {
"host": "openldap1",
"port": "389",
"enable_tls": "no",
"auth_dn_prefix": "cn=",
"auth_dn_suffix": ",ou=users,dc=company,dc=com",
"verification_cooldown": "600"
}}
self.context.ldap_node = self.context.cluster.node(server)
try:
with Given("I add user to LDAP"):
user = {"cn": "testVCD", "userpassword": "testVCD"}
user = add_user_to_ldap(**user)
with ldap_servers(servers):
with ldap_authenticated_users({"username": user["cn"], "server": server}, config_file=f"ldap_users_{getuid()}.xml"):
with When("I login and execute a query"):
login_and_execute_query(username=user["cn"], password=user["userpassword"])
with And("I change user password in LDAP"):
change_user_password_in_ldap(user, "newpassword")
with Then("when I try to login again with the old password it should work"):
login_and_execute_query(username=user["cn"], password=user["userpassword"])
finally:
with Finally("I make sure LDAP user is deleted"):
if user is not None:
delete_user_from_ldap(user, exitcode=None)
@TestScenario
@Tags("verification_cooldown")
@Requirements(
RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown("1.0")
)
def valid_verification_cooldown_value_ldap_unavailable(self, server, rbac=False):
"""Check that we can perform requests without contacting the LDAP server
after successful authentication when the verification_cooldown parameter
is set, even when the LDAP server is offline.
"""
user = None
with Given("I have an LDAP configuration that sets verification_cooldown parameter to 2 sec"):
servers = { "openldap1": {
"host": "openldap1",
"port": "389",
"enable_tls": "no",
"auth_dn_prefix": "cn=",
"auth_dn_suffix": ",ou=users,dc=company,dc=com",
"verification_cooldown": "600"
}}
self.context.ldap_node = self.context.cluster.node(server)
try:
with Given("I add a new user to LDAP"):
user = {"cn": "testVCD", "userpassword": "testVCD"}
user = add_user_to_ldap(**user)
with ldap_servers(servers):
with ldap_authenticated_users({"username": user["cn"], "server": server},
config_file=f"ldap_users_{getuid()}.xml"):
with When("I login and execute a query"):
login_and_execute_query(username=user["cn"], password=user["userpassword"])
try:
with And("then I stop the ldap server"):
self.context.ldap_node.stop()
with Then("when I try to login again with the server offline it should work"):
login_and_execute_query(username=user["cn"], password=user["userpassword"])
finally:
with Finally("I start the ldap server back up"):
self.context.ldap_node.start()
finally:
with Finally("I make sure LDAP user is deleted"):
if user is not None:
delete_user_from_ldap(user, exitcode=None)
@TestOutline
def repeat_requests(self, server, iterations, vcd_value, rbac=False, timeout=600):
"""Run repeated requests from some user to the LDAP server.
"""
user = None
with Given(f"I have an LDAP configuration that sets verification_cooldown parameter to {vcd_value} sec"):
servers = { "openldap1": {
"host": "openldap1",
"port": "389",
"enable_tls": "no",
"auth_dn_prefix": "cn=",
"auth_dn_suffix": ",ou=users,dc=company,dc=com",
"verification_cooldown": vcd_value
}}
self.context.ldap_node = self.context.cluster.node(server)
try:
with And("I add a new user to LDAP"):
user = {"cn": "testVCD", "userpassword": "testVCD"}
user = add_user_to_ldap(**user)
with ldap_servers(servers):
with ldap_authenticated_users({"username": user["cn"], "server": server}, config_file=f"ldap_users_{getuid()}.xml"):
with When(f"I login and execute some query {iterations} times"):
start_time = time.time()
r = self.context.node.command(f"time for i in {{1..{iterations}}}; do clickhouse client -q \"SELECT 1\" --user {user['cn']} --password {user['userpassword']} > /dev/null; done", timeout=timeout)
end_time = time.time()
return end_time - start_time
finally:
with Finally("I make sure LDAP user is deleted"):
if user is not None:
delete_user_from_ldap(user, exitcode=None)
@TestScenario
@Tags("verification_cooldown")
@Requirements(
RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Performance("1.0")
)
def verification_cooldown_performance(self, server, rbac=False, iterations=5000):
"""Check that login performance is better when the verification cooldown
parameter is set to a positive value when comparing to the case when
the verification cooldown parameter is turned off.
"""
vcd_time = 0
no_vcd_time = 0
with Example(f"Repeated requests with verification cooldown parameter set to 600 seconds, {iterations} iterations"):
vcd_time = repeat_requests(server=server, iterations=iterations, vcd_value="600", rbac=rbac)
metric("login_with_vcd_value_600", units="seconds", value=vcd_time)
with Example(f"Repeated requests with verification cooldown parameter set to 0 seconds, {iterations} iterations"):
no_vcd_time = repeat_requests(server=server, iterations=iterations, vcd_value="0", rbac=rbac)
metric("login_with_vcd_value_0", units="seconds", value=no_vcd_time)
with Then("Log the performance improvement as a percentage"):
metric("percentage_improvement", units="%", value=100*(no_vcd_time - vcd_time)/vcd_time)
@TestOutline
def check_verification_cooldown_reset_on_core_server_parameter_change(self, server,
parameter_name, parameter_value, rbac=False):
"""Check that the LDAP login cache is reset for all the LDAP authentication users
when verification_cooldown parameter is set after one of the core server
parameters is changed in the LDAP server configuration.
"""
config_d_dir="/etc/clickhouse-server/config.d"
config_file="ldap_servers.xml"
error_message = "DB::Exception: {user}: Authentication failed: password is incorrect or there is no user with such name"
error_exitcode = 4
user = None
config=None
updated_config=None
with Given("I have an LDAP configuration that sets verification_cooldown parameter to 600 sec"):
servers = { "openldap1": {
"host": "openldap1",
"port": "389",
"enable_tls": "no",
"auth_dn_prefix": "cn=",
"auth_dn_suffix": ",ou=users,dc=company,dc=com",
"verification_cooldown": "600"
}}
self.context.ldap_node = self.context.cluster.node(server)
with And("LDAP authenticated user"):
users = [
{"cn": f"testVCD_0", "userpassword": "testVCD_0"},
{"cn": f"testVCD_1", "userpassword": "testVCD_1"}
]
with And("I create LDAP servers configuration file"):
config = create_ldap_servers_config_content(servers, config_d_dir, config_file)
with ldap_users(*users) as users:
with ldap_servers(servers, restart=True):
with ldap_authenticated_users(*[{"username": user["cn"], "server": server} for user in users]):
with When("I login and execute a query"):
for user in users:
with By(f"as user {user['cn']}"):
login_and_execute_query(username=user["cn"], password=user["userpassword"])
with And("I change user password in LDAP"):
for user in users:
with By(f"for user {user['cn']}"):
change_user_password_in_ldap(user, "newpassword")
with And(f"I change the server {parameter_name} core parameter", description=f"{parameter_value}"):
servers["openldap1"][parameter_name] = parameter_value
with And("I create an updated the config file that has a different server host name"):
updated_config = create_ldap_servers_config_content(servers, config_d_dir, config_file)
with modify_config(updated_config, restart=False):
with Then("when I try to log in it should fail as cache should have been reset"):
for user in users:
with By(f"as user {user['cn']}"):
login_and_execute_query(username=user["cn"], password=user["userpassword"],
exitcode=error_exitcode, message=error_message.format(user=user["cn"]))
@TestScenario
@Tags("verification_cooldown")
@Requirements(
RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters("1.0")
)
def verification_cooldown_reset_on_server_host_parameter_change(self, server, rbac=False):
"""Check that the LDAP login cache is reset for all the LDAP authentication users
when verification_cooldown parameter is set after server host name
is changed in the LDAP server configuration.
"""
check_verification_cooldown_reset_on_core_server_parameter_change(server=server,
parameter_name="host", parameter_value="openldap2", rbac=rbac)
@TestScenario
@Tags("verification_cooldown")
@Requirements(
RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters("1.0")
)
def verification_cooldown_reset_on_server_port_parameter_change(self, server, rbac=False):
"""Check that the LDAP login cache is reset for all the LDAP authentication users
when verification_cooldown parameter is set after server port is changed in the
LDAP server configuration.
"""
check_verification_cooldown_reset_on_core_server_parameter_change(server=server,
parameter_name="port", parameter_value="9006", rbac=rbac)
@TestScenario
@Tags("verification_cooldown")
@Requirements(
RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters("1.0")
)
def verification_cooldown_reset_on_server_auth_dn_prefix_parameter_change(self, server, rbac=False):
"""Check that the LDAP login cache is reset for all the LDAP authentication users
when verification_cooldown parameter is set after server auth_dn_prefix
is changed in the LDAP server configuration.
"""
check_verification_cooldown_reset_on_core_server_parameter_change(server=server,
parameter_name="auth_dn_prefix", parameter_value="cxx=", rbac=rbac)
@TestScenario
@Tags("verification_cooldown")
@Requirements(
RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters("1.0")
)
def verification_cooldown_reset_on_server_auth_dn_suffix_parameter_change(self, server, rbac=False):
"""Check that the LDAP login cache is reset for all the LDAP authentication users
when verification_cooldown parameter is set after server auth_dn_suffix
is changed in the LDAP server configuration.
"""
check_verification_cooldown_reset_on_core_server_parameter_change(server=server,
parameter_name="auth_dn_suffix",
parameter_value=",ou=company,dc=users,dc=com", rbac=rbac)
@TestScenario
@Name("verification cooldown reset when invalid password is provided")
@Tags("verification_cooldown")
@Requirements(
RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_InvalidPassword("1.0")
)
def scenario(self, server, rbac=False):
"""Check that cached bind requests for the user are discarded when
the user provides invalid login credentials.
"""
user = None
error_exitcode = 4
error_message = "DB::Exception: testVCD: Authentication failed: password is incorrect or there is no user with such name"
with Given("I have an LDAP configuration that sets verification_cooldown parameter to 600 sec"):
servers = { "openldap1": {
"host": "openldap1",
"port": "389",
"enable_tls": "no",
"auth_dn_prefix": "cn=",
"auth_dn_suffix": ",ou=users,dc=company,dc=com",
"verification_cooldown": "600"
}}
self.context.ldap_node = self.context.cluster.node(server)
try:
with Given("I add a new user to LDAP"):
user = {"cn": "testVCD", "userpassword": "testVCD"}
user = add_user_to_ldap(**user)
with ldap_servers(servers):
with ldap_authenticated_users({"username": user["cn"], "server": server},
config_file=f"ldap_users_{getuid()}.xml"):
with When("I login and execute a query"):
login_and_execute_query(username=user["cn"], password=user["userpassword"])
with And("I change user password in LDAP"):
change_user_password_in_ldap(user, "newpassword")
with Then("When I try to log in with the cached password it should work"):
login_and_execute_query(username=user["cn"], password=user["userpassword"])
with And("When I try to log in with an incorrect password it should fail"):
login_and_execute_query(username=user["cn"], password="incorrect", exitcode=error_exitcode,
message=error_message)
with And("When I try to log in with the cached password it should fail"):
login_and_execute_query(username=user["cn"], password="incorrect", exitcode=error_exitcode,
message=error_message)
finally:
with Finally("I make sure LDAP user is deleted"):
if user is not None:
delete_user_from_ldap(user, exitcode=None)
@TestFeature
def verification_cooldown(self, rbac, servers=None, node="clickhouse1"):
"""Check verification cooldown parameter functionality.
"""
for scenario in loads(current_module(), Scenario, filter=has.tag("verification_cooldown")):
scenario(server="openldap1", rbac=rbac)
@TestOutline(Feature)
@Name("user authentications")
@Requirements(
RQ_SRS_007_LDAP_Authentication_Mechanism_NamePassword("1.0")
)
@Examples("rbac", [
(False,),
(True, Requirements(RQ_SRS_007_LDAP_Configuration_User_RBAC("1.0")))
])
def feature(self, rbac, servers=None, node="clickhouse1"):
"""Check that users can be authenticated using an LDAP server when
users are configured either using an XML configuration file or RBAC.
"""
self.context.node = self.context.cluster.node(node)
if servers is None:
servers = globals()["servers"]
with ldap_servers(servers):
for scenario in loads(current_module(), Scenario, filter=~has.tag("verification_cooldown")):
scenario(server="openldap1", rbac=rbac)
Feature(test=verification_cooldown)(rbac=rbac, servers=servers, node=node)
| 42.723404
| 214
| 0.659114
| 4,991
| 40,160
| 5.090964
| 0.06131
| 0.020308
| 0.013853
| 0.02078
| 0.821008
| 0.793656
| 0.775867
| 0.752056
| 0.738557
| 0.722539
| 0
| 0.023735
| 0.233118
| 40,160
| 939
| 215
| 42.768903
| 0.801286
| 0.095966
| 0
| 0.630657
| 0
| 0.018978
| 0.256396
| 0.048637
| 0
| 0
| 0
| 0
| 0.00438
| 1
| 0.055474
| false
| 0.19708
| 0.010219
| 0
| 0.067153
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
13f1e2cb6b30db5ba80bca02c3a566229267fdf2
| 89
|
py
|
Python
|
mct_transform_2d/src/mct_transform_2d/__init__.py
|
iorodeo/mct
|
fa8b85f36533c9b1486ca4f6b0c40c3daa6f4e11
|
[
"Apache-2.0"
] | null | null | null |
mct_transform_2d/src/mct_transform_2d/__init__.py
|
iorodeo/mct
|
fa8b85f36533c9b1486ca4f6b0c40c3daa6f4e11
|
[
"Apache-2.0"
] | null | null | null |
mct_transform_2d/src/mct_transform_2d/__init__.py
|
iorodeo/mct
|
fa8b85f36533c9b1486ca4f6b0c40c3daa6f4e11
|
[
"Apache-2.0"
] | null | null | null |
import transform_2d_calibrator_master
import transform_2d_calibrator
import transform_2d
| 22.25
| 37
| 0.932584
| 12
| 89
| 6.416667
| 0.416667
| 0.584416
| 0.662338
| 0.701299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036145
| 0.067416
| 89
| 3
| 38
| 29.666667
| 0.891566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
b91c28674c5c491f996404954630a07529d6237d
| 13,580
|
py
|
Python
|
tempest/util/query_data.py
|
KiranPawar72/tempest
|
1fef3dd92b083055793065dd0693454735ec2c01
|
[
"Apache-2.0"
] | null | null | null |
tempest/util/query_data.py
|
KiranPawar72/tempest
|
1fef3dd92b083055793065dd0693454735ec2c01
|
[
"Apache-2.0"
] | null | null | null |
tempest/util/query_data.py
|
KiranPawar72/tempest
|
1fef3dd92b083055793065dd0693454735ec2c01
|
[
"Apache-2.0"
] | 1
|
2020-07-21T02:18:23.000Z
|
2020-07-21T02:18:23.000Z
|
import db_handler
from oslo_log import log as logging
from tempest import config
LOG = logging.getLogger(__name__)
CONF = config.CONF
def get_workload_count(workload_name):
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_workload_count = ("select count(*) from workloads where display_name='"+workload_name+"' and status=\"available\"")
cursor.execute(get_workload_count)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_workload_id(workload_name):
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_workload_id = ("select id from workloads where display_name='"+workload_name+"' and status=\"available\" order by created_at desc limit 1")
cursor.execute(get_workload_id)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_deleted_workload(workload_id):
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_deleted_workload = ("select status from workloads where id='"+str(workload_id)+"' order by updated_at desc limit 1")
cursor.execute(get_deleted_workload)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_available_snapshots_for_workload(snapshot_name, workload_id):
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_available_snapshots = ("select count(*) from snapshots where display_name='"+snapshot_name+"' and workload_id='" + str(workload_id) + "' and deleted=0 order by created_at desc limit 1")
cursor.execute(get_available_snapshots)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_available_snapshots():
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_available_snapshots = ("select count(*) from snapshots where deleted=0 and status = 'available' order by created_at desc limit 1")
cursor.execute(get_available_snapshots)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_available_workloads():
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_available_workloads = ("select count(*) from workloads where status=\"available\"")
cursor.execute(get_available_workloads)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_available_restores():
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_available_restores = ("select count(*) from restores where status=\"available\"")
cursor.execute(get_available_restores)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_workload_snapshot_status(snapshot_name,snapshot_type, snapshot_id):
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_workload_snapshot = ("select status from snapshots where display_name='"+snapshot_name+"' and snapshot_type='"+snapshot_type+"' and id='" + str(snapshot_id) + "' order by created_at desc limit 1")
cursor.execute(get_workload_snapshot)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_workload_snapshot_delete_status(snapshot_name,snapshot_type, snapshot_id):
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_workload_snapshot_delete_status = ("select deleted from snapshots where display_name='"+snapshot_name+"' and snapshot_type='"+snapshot_type+"' and id='" + str(snapshot_id) + "' order by deleted_at desc limit 1")
cursor.execute(get_workload_snapshot_delete_status)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_workload_vmid(workload_id):
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_workload_vmid = ("select vm_id from workload_vms where workload_id='"+workload_id+"'")
cursor.execute(get_workload_vmid)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_workload_snapshot_id(workload_id):
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_workload_vmid = ("select id from snapshots where workload_id='"+workload_id+"' and status=\"available\" order by updated_at desc limit 1")
cursor.execute(get_workload_vmid)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_inprogress_snapshot_id(workload_id):
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_workload_vmid = ("select id from snapshots where workload_id='"+workload_id+"' and status<>'available' order by updated_at desc limit 1")
cursor.execute(get_workload_vmid)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_snapshot_restore_status(restore_name,snapshot_id):
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_snapshot_restore_status = ("select status from restores where display_name='"+restore_name+"' and snapshot_id='"+snapshot_id+"' order by created_at desc limit 1")
cursor.execute(get_snapshot_restore_status)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_workload_display_name(workload_id):
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_workload_display_name = ("select display_name from workloads where id='"+workload_id+"'")
cursor.execute(get_workload_display_name)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_workload_display_description(workload_id):
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_workload_display_description = ("select display_description from workloads where id='"+workload_id+"'")
cursor.execute(get_workload_display_description)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_vmids():
try:
vmlist = []
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_vmids = ("select vm_id from workload_vms where status=\"available\"")
cursor.execute(get_vmids)
rows = cursor.fetchall()
for row in rows:
vmlist.append(str(row[0]))
return vmlist
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_workload_status(workload_name):
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_workload_snapshot = ("select status from workloads where display_name='"+workload_name+"' order by created_at desc limit 1")
cursor.execute(get_workload_snapshot)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_snapshot_restore_delete_status(restore_name,restore_type):
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_snapshot_restore_delete_status = ("select deleted from restores where display_name='"+restore_name+"' and restore_type='"+restore_type+"' order by deleted_at desc limit 1")
cursor.execute(get_snapshot_restore_delete_status)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_snapshot_restore_id(snapshot_id):
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_snapshot_restore_id = ("select id from restores where snapshot_id='"+snapshot_id+"' order by deleted_at asc limit 1")
cursor.execute(get_snapshot_restore_id)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_available_vms_of_workload(workload_id):
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_count = ("select count(*) from workload_vms where workload_id='" + str(workload_id) + "' and status <> 'deleted';")
cursor.execute(get_count)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_workload_status_by_id(workload_id):
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_workload_snapshot = ("select status from workloads where id='"+workload_id+"' order by created_at desc limit 1")
cursor.execute(get_workload_snapshot)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
LOG.error(str(e))
finally:
cursor.close()
conn.close()
def get_workload_schedule(workload_id):
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_workload_schedule = ("select jobschedule from workloads where id='"+workload_id+"' order by created_at desc limit 1")
cursor.execute(get_workload_schedule)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
LOG.error(str(e))
finally:
cursor.close()
conn.close()
def get_available_workload_types():
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_workload_types = ("select count(*) from workload_types where deleted <> 1")
cursor.execute(get_workload_types)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
LOG.error(str(e))
finally:
cursor.close()
conn.close()
def get_workload_type_data(workload_type_id):
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_workload_types = ("select * from workload_types where deleted <> 1 and ID='" + str(workload_type_id) + "'")
cursor.execute(get_workload_types)
rows = cursor.fetchall()
for row in rows:
return row
except Exception as e:
LOG.error(str(e))
finally:
cursor.close()
conn.close()
def get_workload_vmids(workload_id):
try:
vm_ids = []
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_workload_vmid = ("select vm_id from workload_vms where workload_id='"+workload_id+"'")
cursor.execute(get_workload_vmid)
rows = cursor.fetchall()
for row in rows:
vm_ids.append(row[0])
return vm_ids
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_config_backup_id():
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_config_backup_id = ("select id from config_backups order by created_at desc limit 1")
cursor.execute(get_config_backup_id)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
def get_config_workload_id():
try:
conn = db_handler.dbHandler()
cursor = conn.cursor()
get_config_backup_id = ("select id from config_workloads;")
cursor.execute(get_config_backup_id)
rows = cursor.fetchall()
for row in rows:
return row[0]
except Exception as e:
print (str(e))
finally:
cursor.close()
conn.close()
| 32.104019
| 223
| 0.616863
| 1,659
| 13,580
| 4.846293
| 0.046414
| 0.058831
| 0.043657
| 0.073881
| 0.901617
| 0.886567
| 0.856592
| 0.830473
| 0.811692
| 0.80597
| 0
| 0.004624
| 0.283432
| 13,580
| 422
| 224
| 32.180095
| 0.821601
| 0
| 0
| 0.79845
| 0
| 0.002584
| 0.147791
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.007752
| 0
| 0.147287
| 0.059432
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b91eced698d4c640d25fbba8d45a509ab6cdd4ee
| 188
|
py
|
Python
|
onnx/backend/test/report/base.py
|
HeliWang/onnx-fixed
|
266ec0a6a302d99710921f9bc8047d5fe3558328
|
[
"MIT"
] | 4,071
|
2018-12-13T04:17:38.000Z
|
2022-03-30T03:29:35.000Z
|
blaze/thirdparty/onnx/onnx-1.2.2/onnx/backend/test/report/base.py
|
laozhuang727/x-deeplearning
|
781545783a4e2bbbda48fc64318fb2c6d8bbb3cc
|
[
"Apache-2.0"
] | 359
|
2018-12-21T01:14:57.000Z
|
2022-02-15T07:18:02.000Z
|
blaze/thirdparty/onnx/onnx-1.2.2/onnx/backend/test/report/base.py
|
laozhuang727/x-deeplearning
|
781545783a4e2bbbda48fc64318fb2c6d8bbb3cc
|
[
"Apache-2.0"
] | 1,054
|
2018-12-20T09:57:42.000Z
|
2022-03-29T07:16:53.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
class ReporterBase(object):
pass
| 20.888889
| 39
| 0.851064
| 23
| 188
| 6.130435
| 0.565217
| 0.283688
| 0.453901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132979
| 188
| 8
| 40
| 23.5
| 0.865031
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.166667
| 0.666667
| 0
| 0.833333
| 0.166667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
b96ec78b8ff701966e2224e69031e93a2e55b609
| 29,094
|
py
|
Python
|
tests/test_create_schema.py
|
kellyjonbrazil/jwlk
|
99f67716552d7856355ddc286ea1859482141d3b
|
[
"MIT"
] | null | null | null |
tests/test_create_schema.py
|
kellyjonbrazil/jwlk
|
99f67716552d7856355ddc286ea1859482141d3b
|
[
"MIT"
] | null | null | null |
tests/test_create_schema.py
|
kellyjonbrazil/jwlk
|
99f67716552d7856355ddc286ea1859482141d3b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import unittest
import os
from jello.lib import opts, Schema
class MyTests(unittest.TestCase):
def setUp(self):
# initialize options
opts.initialize = None
opts.version_info = None
opts.helpme = None
opts.compact = None
opts.nulls = None
opts.raw = None
opts.lines = None
opts.mono = None
opts.schema = None
opts.types = None
opts.keyname_color = None
opts.keyword_color = None
opts.number_color = None
opts.string_color = None
# initialize schema_lists
self.schema = Schema()
# initialize JELLO_COLORS env variable
os.environ['JELLO_COLORS'] = 'default,default,default,default'
# set the colors
self.schema.set_colors()
# create samples
self.dict_sample = {
'string': 'string\nwith newline\ncharacters in it',
'true': True,
'false': False,
'null': None,
'int': 42,
'float': 3.14,
'array': [
'string\nwith newline\ncharacters in it',
True,
False,
None,
42,
3.14
]
}
self.list_sample = [
'string\nwith newline\ncharacters in it',
True,
False,
None,
42,
3.14
]
self.list_of_dicts_sample = [
{
'string': 'string\nwith newline\ncharacters in it',
'true': True,
'false': False,
'null': None,
'int': 42,
'float': 3.14,
'array': [
'string\nwith newline\ncharacters in it',
True,
False,
None,
42,
3.14
]
},
{
'string': 'another string\nwith newline\ncharacters in it',
'true': True,
'false': False,
'null': None,
'int': 10001,
'float': -400.45,
'array': [
'string\nwith newline\ncharacters in it',
True,
False,
None,
-6000034,
999999.854321
]
}
]
self.list_of_lists_sample = [
[
'string\nwith newline\ncharacters in it',
True,
False,
None,
42,
3.14
],
[
'another string\nwith newline\ncharacters in it',
True,
False,
None,
42001,
-3.14
]
]
self.dict_space_keys_nest_sample = {
'string with spaces': 'string\nwith newline\ncharacters in it',
'foo': {
"another with spaces": {
"nested": {
"nested space": True
}
}
},
'true': True,
'false': False,
'null': None,
'int': 42,
'float': 3.14,
'array': [
'string\nwith newline\ncharacters in it',
True,
False,
None,
42,
3.14
]
}
self.deep_nest_sample = [[[[{"foo":[[[[1,2,3]]]]}]]]]
# ------------ Tests ------------
#
# Naked True
#
def test_true(self):
"""
Test True
"""
data_in = True
expected = '\x1b[34;01m_\x1b[39;00m = \x1b[90mtrue\x1b[39m;'
output = self.schema.create_schema(data_in)
self.assertEqual(self.schema.color_output(output), expected)
def test_true_m(self):
"""
Test True -m
"""
data_in = True
expected = '_ = true;'
self.assertEqual(self.schema.create_schema(data_in), expected)
#
# Naked False
#
def test_false(self):
"""
Test False
"""
data_in = False
expected = '\x1b[34;01m_\x1b[39;00m = \x1b[90mfalse\x1b[39m;'
output = self.schema.create_schema(data_in)
self.assertEqual(self.schema.color_output(output), expected)
def test_false_m(self):
"""
Test False -m
"""
data_in = False
expected = '_ = false;'
self.assertEqual(self.schema.create_schema(data_in), expected)
#
# Naked null
#
def test_null(self):
"""
Test None
"""
data_in = None
expected = '\x1b[34;01m_\x1b[39;00m = \x1b[90mnull\x1b[39m;'
output = self.schema.create_schema(data_in)
self.assertEqual(self.schema.color_output(output), expected)
def test_null_m(self):
"""
Test None -m
"""
data_in = None
expected = '_ = null;'
self.assertEqual(self.schema.create_schema(data_in), expected)
#
# naked int
#
def test_int(self):
"""
Test int
"""
data_in = 42
expected = '\x1b[34;01m_\x1b[39;00m = \x1b[35m42\x1b[39m;'
output = self.schema.create_schema(data_in)
self.assertEqual(self.schema.color_output(output), expected)
def test_int_m(self):
"""
Test int -m
"""
data_in = 42
expected = '_ = 42;'
self.assertEqual(self.schema.create_schema(data_in), expected)
#
# naked float
#
def test_float(self):
"""
Test float
"""
data_in = 3.14
expected = '\x1b[34;01m_\x1b[39;00m = \x1b[35m3.14\x1b[39m;'
output = self.schema.create_schema(data_in)
self.assertEqual(self.schema.color_output(output), expected)
def test_float_m(self):
"""
Test float -m
"""
data_in = 3.14
expected = '_ = 3.14;'
self.assertEqual(self.schema.create_schema(data_in), expected)
#
# naked string
#
def test_string(self):
"""
Test string
"""
data_in = '"string with\\nnewline char"'
expected = '\x1b[34;01m_\x1b[39;00m = \x1b[32m"\\"string with\\\\nnewline char\\""\x1b[39m;'
output = self.schema.create_schema(data_in)
self.assertEqual(self.schema.color_output(output), expected)
def test_string_m(self):
"""
Test string -m
"""
data_in = '"string with\\nnewline char"'
expected = '_ = "\\"string with\\\\nnewline char\\"";'
self.assertEqual(self.schema.create_schema(data_in), expected)
#
# Naked Dict
#
def test_dict(self):
"""
Test self.dict_sample
"""
data_in = self.dict_sample
expected = '\x1b[34;01m_\x1b[39;00m = {};\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01mstring\x1b[39;00m = \x1b[32m"string\\nwith newline\\ncharacters in it"\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[90mtrue\x1b[39m = \x1b[90mtrue\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[90mfalse\x1b[39m = \x1b[90mfalse\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[90mnull\x1b[39m = \x1b[90mnull\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[90mint\x1b[39m = \x1b[35m42\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[90mfloat\x1b[39m = \x1b[35m3.14\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m = [];\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m[\x1b[35m0\x1b[39m] = \x1b[32m"string\\nwith newline\\ncharacters in it"\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m[\x1b[35m1\x1b[39m] = \x1b[90mtrue\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m[\x1b[35m2\x1b[39m] = \x1b[90mfalse\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m[\x1b[35m3\x1b[39m] = \x1b[90mnull\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m[\x1b[35m4\x1b[39m] = \x1b[35m42\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m[\x1b[35m5\x1b[39m] = \x1b[35m3.14\x1b[39m;'
output = self.schema.create_schema(data_in)
self.assertEqual(self.schema.color_output(output), expected)
def test_dict_t(self):
"""
Test self.dict_sample -t
"""
opts.types = True
data_in = self.dict_sample
expected = '\x1b[34;01m_\x1b[39;00m = {}; // (object)\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01mstring\x1b[39;00m = \x1b[32m"string\\nwith newline\\ncharacters in it"\x1b[39m; // (string)\n\x1b[34;01m_\x1b[39;00m.\x1b[90mtrue\x1b[39m = \x1b[90mtrue\x1b[39m; // (boolean)\n\x1b[34;01m_\x1b[39;00m.\x1b[90mfalse\x1b[39m = \x1b[90mfalse\x1b[39m; // (boolean)\n\x1b[34;01m_\x1b[39;00m.\x1b[90mnull\x1b[39m = \x1b[90mnull\x1b[39m; // (null)\n\x1b[34;01m_\x1b[39;00m.\x1b[90mint\x1b[39m = \x1b[35m42\x1b[39m; // (number)\n\x1b[34;01m_\x1b[39;00m.\x1b[90mfloat\x1b[39m = \x1b[35m3.14\x1b[39m; // (number)\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m = []; // (array)\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m[\x1b[35m0\x1b[39m] = \x1b[32m"string\\nwith newline\\ncharacters in it"\x1b[39m; // (string)\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m[\x1b[35m1\x1b[39m] = \x1b[90mtrue\x1b[39m; // (boolean)\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m[\x1b[35m2\x1b[39m] = \x1b[90mfalse\x1b[39m; // (boolean)\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m[\x1b[35m3\x1b[39m] = \x1b[90mnull\x1b[39m; // (null)\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m[\x1b[35m4\x1b[39m] = \x1b[35m42\x1b[39m; // (number)\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m[\x1b[35m5\x1b[39m] = \x1b[35m3.14\x1b[39m; // (number)'
output = self.schema.create_schema(data_in)
self.assertEqual(self.schema.color_output(output), expected)
def test_dict_m(self):
"""
Test self.dict_sample -m
"""
data_in = self.dict_sample
expected = '_ = {};\n_.string = "string\\nwith newline\\ncharacters in it";\n_.true = true;\n_.false = false;\n_.null = null;\n_.int = 42;\n_.float = 3.14;\n_.array = [];\n_.array[0] = "string\\nwith newline\\ncharacters in it";\n_.array[1] = true;\n_.array[2] = false;\n_.array[3] = null;\n_.array[4] = 42;\n_.array[5] = 3.14;'
self.assertEqual(self.schema.create_schema(data_in), expected)
def test_dict_mt(self):
"""
Test self.dict_sample -mt
"""
opts.types = True
data_in = self.dict_sample
expected = '_ = {}; // (object)\n_.string = "string\\nwith newline\\ncharacters in it"; // (string)\n_.true = true; // (boolean)\n_.false = false; // (boolean)\n_.null = null; // (null)\n_.int = 42; // (number)\n_.float = 3.14; // (number)\n_.array = []; // (array)\n_.array[0] = "string\\nwith newline\\ncharacters in it"; // (string)\n_.array[1] = true; // (boolean)\n_.array[2] = false; // (boolean)\n_.array[3] = null; // (null)\n_.array[4] = 42; // (number)\n_.array[5] = 3.14; // (number)'
self.assertEqual(self.schema.create_schema(data_in), expected)
#
# true in a list
#
def test_list_true(self):
"""
Test [True]
"""
data_in = [True]
expected = '\x1b[34;01m_\x1b[39;00m = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m] = \x1b[90mtrue\x1b[39m;'
output = self.schema.create_schema(data_in)
self.assertEqual(self.schema.color_output(output), expected)
def test_list_true_m(self):
"""
Test [True] -m
"""
data_in = [True]
expected = '_ = [];\n_[0] = true;'
self.assertEqual(self.schema.create_schema(data_in), expected)
#
# false in a list
#
def test_list_false(self):
"""
Test [False]
"""
data_in = [False]
expected = '\x1b[34;01m_\x1b[39;00m = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m] = \x1b[90mfalse\x1b[39m;'
output = self.schema.create_schema(data_in)
self.assertEqual(self.schema.color_output(output), expected)
def test_list_false_m(self):
"""
Test [False] -m
"""
data_in = [False]
expected = '_ = [];\n_[0] = false;'
self.assertEqual(self.schema.create_schema(data_in), expected)
#
# null in a list
#
def test_list_null(self):
"""
Test [None]
"""
data_in = [None]
expected = '\x1b[34;01m_\x1b[39;00m = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m] = \x1b[90mnull\x1b[39m;'
output = self.schema.create_schema(data_in)
self.assertEqual(self.schema.color_output(output), expected)
def test_list_null_m(self):
"""
Test [None] -m
"""
data_in = [None]
expected = '_ = [];\n_[0] = null;'
self.assertEqual(self.schema.create_schema(data_in), expected)
#
# Int in a list
#
def test_list_int(self):
"""
Test [42]
"""
data_in = [42]
expected = '\x1b[34;01m_\x1b[39;00m = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m] = \x1b[35m42\x1b[39m;'
output = self.schema.create_schema(data_in)
self.assertEqual(self.schema.color_output(output), expected)
def test_list_int_m(self):
"""
Test [42] -m
"""
data_in = [42]
expected = '_ = [];\n_[0] = 42;'
self.assertEqual(self.schema.create_schema(data_in), expected)
#
# Float in a list
#
def test_list_float(self):
"""
Test [3.14]
"""
data_in = [3.14]
expected = '\x1b[34;01m_\x1b[39;00m = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m] = \x1b[35m3.14\x1b[39m;'
output = self.schema.create_schema(data_in)
self.assertEqual(self.schema.color_output(output), expected)
def test_list_float_m(self):
"""
Test [3.14] -m
"""
data_in = [3.14]
expected = '_ = [];\n_[0] = 3.14;'
self.assertEqual(self.schema.create_schema(data_in), expected)
#
# String in a list
#
def test_list_str(self):
"""
Test ['string with spaces\nand newline\ncharacters']
"""
data_in = ['string with spaces\nand newline\ncharacters']
expected = '\x1b[34;01m_\x1b[39;00m = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m] = \x1b[32m"string with spaces\\nand newline\\ncharacters"\x1b[39m;'
output = self.schema.create_schema(data_in)
self.assertEqual(self.schema.color_output(output), expected)
def test_list_str_m(self):
"""
Test ['string with spaces\nand newline\ncharacters'] -m
"""
data_in = ['string with spaces\nand newline\ncharacters']
expected = '_ = [];\n_[0] = "string with spaces\\nand newline\\ncharacters";'
self.assertEqual(self.schema.create_schema(data_in), expected)
#
# List with different types of elements
#
def test_list_sample(self):
"""
Test self.list_sample
"""
data_in = self.list_sample
expected = '\x1b[34;01m_\x1b[39;00m = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m] = \x1b[32m"string\\nwith newline\\ncharacters in it"\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m] = \x1b[90mtrue\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m2\x1b[39m] = \x1b[90mfalse\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m3\x1b[39m] = \x1b[90mnull\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m4\x1b[39m] = \x1b[35m42\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m5\x1b[39m] = \x1b[35m3.14\x1b[39m;'
output = self.schema.create_schema(data_in)
self.assertEqual(self.schema.color_output(output), expected)
def test_list_sample_m(self):
"""
Test self.list_sample -m
"""
data_in = self.list_sample
expected = '_ = [];\n_[0] = "string\\nwith newline\\ncharacters in it";\n_[1] = true;\n_[2] = false;\n_[3] = null;\n_[4] = 42;\n_[5] = 3.14;'
self.assertEqual(self.schema.create_schema(data_in), expected)
#
# Dicts in a list
#
def test_list_dict(self):
"""
Test self.list_of_dicts_sample
"""
data_in = self.list_of_dicts_sample
expected = '\x1b[34;01m_\x1b[39;00m = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m] = {};\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m].\x1b[34;01mstring\x1b[39;00m = \x1b[32m"string\\nwith newline\\ncharacters in it"\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m].\x1b[90mtrue\x1b[39m = \x1b[90mtrue\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m].\x1b[90mfalse\x1b[39m = \x1b[90mfalse\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m].\x1b[90mnull\x1b[39m = \x1b[90mnull\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m].\x1b[90mint\x1b[39m = \x1b[35m42\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m].\x1b[90mfloat\x1b[39m = \x1b[35m3.14\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m].\x1b[34;01marray\x1b[39;00m = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m0\x1b[39m] = \x1b[32m"string\\nwith newline\\ncharacters in it"\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m1\x1b[39m] = \x1b[90mtrue\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m2\x1b[39m] = \x1b[90mfalse\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m3\x1b[39m] = \x1b[90mnull\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m4\x1b[39m] = \x1b[35m42\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m5\x1b[39m] = \x1b[35m3.14\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m] = {};\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m].\x1b[34;01mstring\x1b[39;00m = \x1b[32m"another string\\nwith newline\\ncharacters in it"\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m].\x1b[90mtrue\x1b[39m = \x1b[90mtrue\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m].\x1b[90mfalse\x1b[39m = \x1b[90mfalse\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m].\x1b[90mnull\x1b[39m = \x1b[90mnull\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m].\x1b[90mint\x1b[39m = \x1b[35m10001\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m].\x1b[90mfloat\x1b[39m = -\x1b[35m400.45\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m].\x1b[34;01marray\x1b[39;00m = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m0\x1b[39m] = \x1b[32m"string\\nwith newline\\ncharacters in it"\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m1\x1b[39m] = \x1b[90mtrue\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m2\x1b[39m] = \x1b[90mfalse\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m3\x1b[39m] = \x1b[90mnull\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m4\x1b[39m] = -\x1b[35m6000034\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m].\x1b[34;01marray\x1b[39;00m[\x1b[35m5\x1b[39m] = \x1b[35m999999.854321\x1b[39m;'
output = self.schema.create_schema(data_in)
self.assertEqual(self.schema.color_output(output), expected)
def test_list_dict_m(self):
"""
Test self.list_of_dicts_sample -m
"""
data_in = self.list_of_dicts_sample
expected = '_ = [];\n_[0] = {};\n_[0].string = "string\\nwith newline\\ncharacters in it";\n_[0].true = true;\n_[0].false = false;\n_[0].null = null;\n_[0].int = 42;\n_[0].float = 3.14;\n_[0].array = [];\n_[0].array[0] = "string\\nwith newline\\ncharacters in it";\n_[0].array[1] = true;\n_[0].array[2] = false;\n_[0].array[3] = null;\n_[0].array[4] = 42;\n_[0].array[5] = 3.14;\n_[1] = {};\n_[1].string = "another string\\nwith newline\\ncharacters in it";\n_[1].true = true;\n_[1].false = false;\n_[1].null = null;\n_[1].int = 10001;\n_[1].float = -400.45;\n_[1].array = [];\n_[1].array[0] = "string\\nwith newline\\ncharacters in it";\n_[1].array[1] = true;\n_[1].array[2] = false;\n_[1].array[3] = null;\n_[1].array[4] = -6000034;\n_[1].array[5] = 999999.854321;'
self.assertEqual(self.schema.create_schema(data_in), expected)
#
# lists in list
#
def test_list_list(self):
"""
Test self.list_of_lists_sample
"""
data_in = self.list_of_lists_sample
expected = '\x1b[34;01m_\x1b[39;00m = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m] = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m] = \x1b[32m"string\\nwith newline\\ncharacters in it"\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m1\x1b[39m] = \x1b[90mtrue\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m2\x1b[39m] = \x1b[90mfalse\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m3\x1b[39m] = \x1b[90mnull\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m4\x1b[39m] = \x1b[35m42\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m5\x1b[39m] = \x1b[35m3.14\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m] = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m][\x1b[35m0\x1b[39m] = \x1b[32m"another string\\nwith newline\\ncharacters in it"\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m][\x1b[35m1\x1b[39m] = \x1b[90mtrue\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m][\x1b[35m2\x1b[39m] = \x1b[90mfalse\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m][\x1b[35m3\x1b[39m] = \x1b[90mnull\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m][\x1b[35m4\x1b[39m] = \x1b[35m42001\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m1\x1b[39m][\x1b[35m5\x1b[39m] = -\x1b[35m3.14\x1b[39m;'
output = self.schema.create_schema(data_in)
self.assertEqual(self.schema.color_output(output), expected)
def test_list_list_m(self):
"""
Test self.list_of_lists_sample -m
"""
data_in = self.list_of_lists_sample
expected = '_ = [];\n_[0] = [];\n_[0][0] = "string\\nwith newline\\ncharacters in it";\n_[0][1] = true;\n_[0][2] = false;\n_[0][3] = null;\n_[0][4] = 42;\n_[0][5] = 3.14;\n_[1] = [];\n_[1][0] = "another string\\nwith newline\\ncharacters in it";\n_[1][1] = true;\n_[1][2] = false;\n_[1][3] = null;\n_[1][4] = 42001;\n_[1][5] = -3.14;'
self.assertEqual(self.schema.create_schema(data_in), expected)
#
# deep nest
#
def test_dict_space_keys_nest(self):
"""
Test self.dict_space_keys_nest_sample
"""
data_in = self.dict_space_keys_nest_sample
expected = '\x1b[34;01m_\x1b[39;00m = {};\n\x1b[34;01m_\x1b[39;00m[\x1b[32m"string with spaces"\x1b[39m] = \x1b[32m"string\\nwith newline\\ncharacters in it"\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01mfoo\x1b[39;00m = {};\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01mfoo\x1b[39;00m[\x1b[32m"another with spaces"\x1b[39m] = {};\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01mfoo\x1b[39;00m[\x1b[32m"another with spaces"\x1b[39m].\x1b[34;01mnested\x1b[39;00m = {};\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01mfoo\x1b[39;00m[\x1b[32m"another with spaces"\x1b[39m].\x1b[34;01mnested\x1b[39;00m[\x1b[32m"nested space"\x1b[39m] = \x1b[90mtrue\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[90mtrue\x1b[39m = \x1b[90mtrue\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[90mfalse\x1b[39m = \x1b[90mfalse\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[90mnull\x1b[39m = \x1b[90mnull\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[90mint\x1b[39m = \x1b[35m42\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[90mfloat\x1b[39m = \x1b[35m3.14\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m = [];\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m[\x1b[35m0\x1b[39m] = \x1b[32m"string\\nwith newline\\ncharacters in it"\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m[\x1b[35m1\x1b[39m] = \x1b[90mtrue\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m[\x1b[35m2\x1b[39m] = \x1b[90mfalse\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m[\x1b[35m3\x1b[39m] = \x1b[90mnull\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m[\x1b[35m4\x1b[39m] = \x1b[35m42\x1b[39m;\n\x1b[34;01m_\x1b[39;00m.\x1b[34;01marray\x1b[39;00m[\x1b[35m5\x1b[39m] = \x1b[35m3.14\x1b[39m;'
output = self.schema.create_schema(data_in)
self.assertEqual(self.schema.color_output(output), expected)
def test_dict_space_keys_nest_m(self):
"""
Test self.dict_space_keys_nest_sample -m
"""
data_in = self.dict_space_keys_nest_sample
expected = '_ = {};\n_["string with spaces"] = "string\\nwith newline\\ncharacters in it";\n_.foo = {};\n_.foo["another with spaces"] = {};\n_.foo["another with spaces"].nested = {};\n_.foo["another with spaces"].nested["nested space"] = true;\n_.true = true;\n_.false = false;\n_.null = null;\n_.int = 42;\n_.float = 3.14;\n_.array = [];\n_.array[0] = "string\\nwith newline\\ncharacters in it";\n_.array[1] = true;\n_.array[2] = false;\n_.array[3] = null;\n_.array[4] = 42;\n_.array[5] = 3.14;'
self.assertEqual(self.schema.create_schema(data_in), expected)
def test_deep_nest(self):
"""
Test self.deep_nest_sample
"""
data_in = self.deep_nest_sample
expected = '\x1b[34;01m_\x1b[39;00m = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m] = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m] = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m] = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m] = {};\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m].\x1b[34;01mfoo\x1b[39;00m = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m].\x1b[34;01mfoo\x1b[39;00m[\x1b[35m0\x1b[39m] = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m].\x1b[34;01mfoo\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m] = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m].\x1b[34;01mfoo\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m] = [];\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m].\x1b[34;01mfoo\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m] = \x1b[35m1\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m].\x1b[34;01mfoo\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m1\x1b[39m] = \x1b[35m2\x1b[39m;\n\x1b[34;01m_\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m].\x1b[34;01mfoo\x1b[39;00m[\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m0\x1b[39m][\x1b[35m2\x1b[39m] = \x1b[35m3\x1b[39m;'
output = self.schema.create_schema(data_in)
self.assertEqual(self.schema.color_output(output), expected)
def test_deep_nest_m(self):
"""
Test self.deep_nest_sample -m
"""
data_in = self.deep_nest_sample
expected = '_ = [];\n_[0] = [];\n_[0][0] = [];\n_[0][0][0] = [];\n_[0][0][0][0] = {};\n_[0][0][0][0].foo = [];\n_[0][0][0][0].foo[0] = [];\n_[0][0][0][0].foo[0][0] = [];\n_[0][0][0][0].foo[0][0][0] = [];\n_[0][0][0][0].foo[0][0][0][0] = 1;\n_[0][0][0][0].foo[0][0][0][1] = 2;\n_[0][0][0][0].foo[0][0][0][2] = 3;'
self.assertEqual(self.schema.create_schema(data_in), expected)
if __name__ == '__main__':
unittest.main()
| 53.977737
| 2,949
| 0.563175
| 4,355
| 29,094
| 3.633295
| 0.030999
| 0.104658
| 0.090501
| 0.10984
| 0.900651
| 0.883461
| 0.853568
| 0.834165
| 0.799406
| 0.744675
| 0
| 0.184415
| 0.243109
| 29,094
| 538
| 2,950
| 54.078067
| 0.534127
| 0.042655
| 0
| 0.531987
| 0
| 0.087542
| 0.614819
| 0.395755
| 0
| 0
| 0
| 0
| 0.127946
| 1
| 0.131313
| false
| 0
| 0.010101
| 0
| 0.144781
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b9ae05608088dbc70a214eb0a7f5df2bdb7907eb
| 15,500
|
py
|
Python
|
gcloud/tests/tasktempl3/test_api.py
|
SHUN-YI/bk-sops
|
a4a841bdc44a18518c6c53c04a02996ddc7da2be
|
[
"Apache-2.0"
] | 2
|
2019-08-15T10:06:26.000Z
|
2019-09-17T11:49:20.000Z
|
gcloud/tests/tasktempl3/test_api.py
|
SHUN-YI/bk-sops
|
a4a841bdc44a18518c6c53c04a02996ddc7da2be
|
[
"Apache-2.0"
] | null | null | null |
gcloud/tests/tasktempl3/test_api.py
|
SHUN-YI/bk-sops
|
a4a841bdc44a18518c6c53c04a02996ddc7da2be
|
[
"Apache-2.0"
] | 1
|
2020-07-03T06:45:07.000Z
|
2020-07-03T06:45:07.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from __future__ import absolute_import
from django.test import TestCase
from gcloud.tasktmpl3 import api
class ApiTestCase(TestCase):
def test_replace_job_relate_id_in_templates_data(self):
# for template contain job_var constants
assert_data = {u'activities': {
u'node7ee2048214dbf045e0043fe1f62b': {
u'component': {
u'code': u'job_execute_task',
u'data': {
u'job_global_var': {u'hook': False, u'value': []},
u'job_task_id': {u'hook': False,
u'value': 82401}}},
u'type': u'ServiceActivity'},
u'node51f27cbf5d61d50b36d1b25b58ed': {
u'component': {
u'code': u'job_execute_task', u'data': {
u'job_global_var': {
u'hook': False,
u'value': [{
u'value': u'0:1.1.1.1,0:2.2.2.2,0:3.3.3.3',
u'type': 2,
u'description': u'',
u'name': u'id-2018112023375235',
u'id': 71821},
{
u'value': u'a"bc\'cb\'a',
u'type': 1,
u'description': u'',
u'name': u'str1',
u'id': 71831},
{
u'value': u'ctx1',
u'type': 1,
u'description': u'ctx1',
u'name': u'ctx1',
u'id': 71841},
{
u'value': u'(a b c)',
u'type': 3,
u'description': u'',
u'name': u'aa',
u'id': 71851},
{
u'value': u'([A]=a [B]=b [C]=c)',
u'type': 4,
u'description': u'',
u'name': u'bb',
u'id': 71861},
{
u'value': u'const_str',
u'type': 1,
u'description': u'const_str',
u'name': u'const_str',
u'id': 71901},
{
u'value': u"<script>alert1('xss')</script>",
u'type': 1,
u'description': u'',
u'name': u'str2',
u'id': 72011}]},
u'job_task_id': {
u'hook': False,
u'value': 82251}}},
u'type': u'ServiceActivity', },
u'node10dfa52df286c92c180398a78a5f': {
u'component': {
u'code': u'job_execute_task', u'data': {
u'job_global_var': {u'hook': True,
u'value': u'${job_global_var}'},
u'job_task_id': {u'hook': False, u'value': 10001}}},
u'type': u'ServiceActivity'}},
u'constants': {
u'${job_global_var}': {u'value': [
{u'value': u'5', u'type': 1,
u'name': u'SECONDS', u'id': 11},
{u'value': u'0', u'type': 1,
u'name': u'EXIT', u'id': 21},
{u'value': u'0:1.1.1.1,0:1.1.1.12,0:1.1.1.17', u'type': 2,
u'description': u'wewew',
u'name': u'id-201868163412877', u'id': 31}]}}}
template_data = {'pipeline_template_data': {
'template': {
'1': {'tree': {u'activities': {
u'node7ee2048214dbf045e0043fe1f62b': {
u'component': {u'code': u'job_execute_task', u'data': {
u'job_global_var': {u'hook': False, u'value': []},
u'job_task_id': {u'hook': False, u'value': 8240}}},
u'type': u'ServiceActivity'},
u'node51f27cbf5d61d50b36d1b25b58ed': {u'component': {u'code': u'job_execute_task', u'data': {
u'job_global_var': {u'hook': False, u'value': [{
u'value': u'0:1.1.1.1,0:2.2.2.2,0:3.3.3.3',
u'type': 2,
u'description': u'',
u'name': u'id-2018112023375235',
u'id': 7182},
{
u'value': u'a"bc\'cb\'a',
u'type': 1,
u'description': u'',
u'name': u'str1',
u'id': 7183},
{
u'value': u'ctx1',
u'type': 1,
u'description': u'ctx1',
u'name': u'ctx1',
u'id': 7184},
{
u'value': u'(a b c)',
u'type': 3,
u'description': u'',
u'name': u'aa',
u'id': 7185},
{
u'value': u'([A]=a [B]=b [C]=c)',
u'type': 4,
u'description': u'',
u'name': u'bb',
u'id': 7186},
{
u'value': u'const_str',
u'type': 1,
u'description': u'const_str',
u'name': u'const_str',
u'id': 7190},
{
u'value': u"<script>alert1('xss')</script>",
u'type': 1,
u'description': u'',
u'name': u'str2',
u'id': 7201}]},
u'job_task_id': {u'hook': False, u'value': 8225}}}, u'type': u'ServiceActivity', },
u'node10dfa52df286c92c180398a78a5f': {
u'component': {
u'code': u'job_execute_task', u'data': {
u'job_global_var': {u'hook': True,
u'value': u'${job_global_var}'},
u'job_task_id': {u'hook': False, u'value': 1000}}},
u'type': u'ServiceActivity'}}, u'constants': {
u'${job_global_var}': {
u'value': [
{u'value': u'5', u'type': 1,
u'name': u'SECONDS', u'id': 1},
{u'value': u'0', u'type': 1,
u'name': u'EXIT', u'id': 2},
{u'value': u'0:1.1.1.1,0:1.1.1.12,0:1.1.1.17', u'type': 2,
u'description': u'wewew',
u'name': u'id-201868163412877', u'id': 3}]}}}}
}
}}
job_id_map = {1000: {'id': 10001, 'var_id_map': {1: 11, 2: 21, 3: 31}},
8225: {'id': 82251,
'var_id_map': {7182: 71821,
7183: 71831,
7184: 71841,
7185: 71851,
7186: 71861,
7190: 71901,
7201: 72011}},
8240: {'id': 82401, 'var_id_map': {}}}
api.replace_job_relate_id_in_templates_data(job_id_map, template_data)
self.assertEqual(template_data['pipeline_template_data']['template']['1']['tree'], assert_data)
# for template do not have job atom
assert_data = {u'activities': {
u'node7ee2048214dbf045e0043fe1f62b': {
u'component': {u'code': u'not_a_job', u'data': {
u'job_global_var': {u'hook': False, u'value': []},
u'job_task_id': {u'hook': False, u'value': 82401}}},
u'type': u'ServiceActivity'},
u'node51f27cbf5d61d50b36d1b25b58ed': {u'component': {u'code': u'not_a_job', u'data': {}},
u'type': u'ServiceActivity'},
u'node10dfa52df286c92c180398a78a5f': {u'component': {u'code': u'not_a_job', u'data': {
u'job_global_var': {u'hook': True,
u'value': u'${job_global_var}'},
u'job_task_id': {u'hook': False, u'value': 10001}}}, u'type': u'ServiceActivity'}}, u'constants': {}}
template_data = {'pipeline_template_data': {
'template': {
'1': {'tree': {u'activities': {
u'node7ee2048214dbf045e0043fe1f62b': {
u'component': {u'code': u'not_a_job', u'data': {
u'job_global_var': {u'hook': False, u'value': []},
u'job_task_id': {u'hook': False, u'value': 82401}}},
u'type': u'ServiceActivity'},
u'node51f27cbf5d61d50b36d1b25b58ed': {u'component': {u'code': u'not_a_job', u'data': {}},
u'type': u'ServiceActivity'},
u'node10dfa52df286c92c180398a78a5f': {u'component': {u'code': u'not_a_job', u'data': {
u'job_global_var': {u'hook': True,
u'value': u'${job_global_var}'},
u'job_task_id': {u'hook': False, u'value': 10001}}}, u'type': u'ServiceActivity'}},
u'constants': {}}}
}
}}
job_id_map = {1000: {'id': 10001, 'var_id_map': {1: 11, 2: 21, 3: 31}},
8225: {'id': 82251,
'var_id_map': {7182: 71821,
7183: 71831,
7184: 71841,
7185: 71851,
7186: 71861,
7190: 71901,
7201: 72011}},
8240: {'id': 82401, 'var_id_map': {}}}
api.replace_job_relate_id_in_templates_data(job_id_map, template_data)
self.assertEqual(template_data['pipeline_template_data']['template']['1']['tree'], assert_data)
def test_job_id_map_convert(self):
# empty case
job_id_maps = []
result = api.job_id_map_convert(job_id_maps)
self.assertEqual(result, {})
# one map case
job_id_maps = [
{
"original_job_id": 1,
"new_job_id": 5,
"original_job_name": "test",
"new_job_name": "test_import20190120212225",
"step_id_mapping": [
],
"global_var_id_mapping": [
{
"original_id": 1,
"new_id": 5
},
{
"original_id": 10,
"new_id": 50
}
]
}
]
result = api.job_id_map_convert(job_id_maps)
self.assertEqual(result, {1: {'id': 5,
api.VAR_ID_MAP: {1: 5,
10: 50}}})
# multiple map case
job_id_maps = [
{
"original_job_id": 1,
"new_job_id": 2,
"original_job_name": "test",
"new_job_name": "test_import20190120212225",
"step_id_mapping": [],
"global_var_id_mapping": [
{
"original_id": 1,
"new_id": 5
},
{
"original_id": 10,
"new_id": 50
}
]},
{
"original_job_id": 3,
"new_job_id": 4,
"original_job_name": "test",
"new_job_name": "test_import20190120212225",
"step_id_mapping": [],
"global_var_id_mapping": [
{
"original_id": 2,
"new_id": 10
},
{
"original_id": 10,
"new_id": 50
}]},
{
"original_job_id": 5,
"new_job_id": 6,
"original_job_name": "test",
"new_job_name": "test_import20190120212225",
"step_id_mapping": [],
"global_var_id_mapping": [
{
"original_id": 3,
"new_id": 20
},
{
"original_id": 10,
"new_id": 50
}]}
]
result = api.job_id_map_convert(job_id_maps)
self.assertEqual(result, {1: {'id': 2,
api.VAR_ID_MAP: {1: 5,
10: 50}},
3: {'id': 4,
api.VAR_ID_MAP: {10: 50,
2: 10}},
5: {'id': 6,
api.VAR_ID_MAP: {10: 50,
3: 20}}})
| 47.546012
| 305
| 0.351032
| 1,407
| 15,500
| 3.68941
| 0.140725
| 0.048546
| 0.043152
| 0.040069
| 0.815065
| 0.815065
| 0.809285
| 0.802928
| 0.796378
| 0.777692
| 0
| 0.112395
| 0.52529
| 15,500
| 325
| 306
| 47.692308
| 0.593096
| 0.054323
| 0
| 0.610345
| 0
| 0.013793
| 0.228626
| 0.057088
| 0
| 0
| 0
| 0
| 0.024138
| 1
| 0.006897
| false
| 0
| 0.024138
| 0
| 0.034483
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6a0bb35174ccc64cb3067d3a83b983af091971b0
| 9,726
|
py
|
Python
|
sonic-psud/tests/mock_platform.py
|
praveen-li/sonic-platform-daemons
|
4be43068c22b6fb815eb3a53bdb5f3b476ec6456
|
[
"Apache-2.0"
] | null | null | null |
sonic-psud/tests/mock_platform.py
|
praveen-li/sonic-platform-daemons
|
4be43068c22b6fb815eb3a53bdb5f3b476ec6456
|
[
"Apache-2.0"
] | 2
|
2019-08-01T06:55:16.000Z
|
2022-03-24T03:55:42.000Z
|
sonic-psud/tests/mock_platform.py
|
robocoder99/sonic-platform-daemons
|
911601dfb17403f01bb2340f97b69ad57c2c2cb3
|
[
"Apache-2.0"
] | null | null | null |
from sonic_platform_base import chassis_base
from sonic_platform_base import fan_base
from sonic_platform_base import fan_drawer_base
from sonic_platform_base import module_base
from sonic_platform_base import psu_base
class MockChassis(chassis_base.ChassisBase):
def __init__(self,
name='Fixed Chassis',
position_in_parent=0,
presence=True,
model='Module Model',
serial='Module Serial',
status=True):
super(MockChassis, self).__init__()
self._name = name
self._presence = presence
self._model = model
self._serial = serial
self._status = status
self._position_in_parent = position_in_parent
self._psu_list = []
self._fan_drawer_list = []
self._module_list = []
def get_num_psus(self):
return len(self._psu_list)
def get_all_psus(self):
return self._psu_list
def get_psu(self, index):
return self._psu_list[index]
def get_num_fan_drawers(self):
return len(self._fan_drawer_list)
def get_all_fan_drawers(self):
return self._fan_drawer_list
def get_num_modules(self):
return len(self._module_list)
def get_all_modules(self):
return self._module_list
def get_status_led(self):
return self._status_led_color
def set_status_led(self, color):
self._status_led_color = color
return True
# Methods inherited from DeviceBase class and related setters
def get_name(self):
return self._name
def get_presence(self):
return self._presence
def set_presence(self, presence):
self._presence = presence
def get_model(self):
return self._model
def get_serial(self):
return self._serial
def get_status(self):
return self._status
def set_status(self, status):
self._status = status
def get_position_in_parent(self):
return self._position_in_parent
def is_replaceable(self):
return self._replaceable
class MockFan(fan_base.FanBase):
def __init__(self,
name,
position_in_parent,
presence=True,
model='Module Model',
serial='Module Serial',
status=True,
direction=fan_base.FanBase.FAN_DIRECTION_INTAKE,
speed=50):
super(MockFan, self).__init__()
self._name = name
self._presence = presence
self._model = model
self._serial = serial
self._status = status
self._position_in_parent = position_in_parent
self._direction = direction
self._speed = speed
self._status_led_color = self.STATUS_LED_COLOR_OFF
def get_direction(self):
return self._direction
def get_speed(self):
return self._speed
def get_status_led(self):
return self._status_led_color
def set_status_led(self, color):
self._status_led_color = color
return True
# Methods inherited from DeviceBase class and related setters
def get_name(self):
return self._name
def get_presence(self):
return self._presence
def set_presence(self, presence):
self._presence = presence
def get_model(self):
return self._model
def get_serial(self):
return self._serial
def get_status(self):
return self._status
def set_status(self, status):
self._status = status
def get_position_in_parent(self):
return self._position_in_parent
def is_replaceable(self):
return self._replaceable
class MockFanDrawer(fan_drawer_base.FanDrawerBase):
def __init__(self,
name,
position_in_parent,
presence=True,
model='Module Model',
serial='Module Serial',
status=True):
super(MockFanDrawer, self).__init__()
self._name = name
self._presence = presence
self._model = model
self._serial = serial
self._status = status
self._position_in_parent = position_in_parent
self._max_consumed_power = 500.0
self._status_led_color = self.STATUS_LED_COLOR_OFF
def get_status(self):
return self._status
def set_status(self, status):
self._status = status
def get_maximum_consumed_power(self):
return self._max_consumed_power
def set_maximum_consumed_power(self, consumed_power):
self._max_consumed_power = consumed_power
def get_status_led(self):
return self._status_led_color
def set_status_led(self, color):
self._status_led_color = color
return True
# Methods inherited from DeviceBase class and related setters
def get_name(self):
return self._name
def get_presence(self):
return self._presence
def set_presence(self, presence):
self._presence = presence
def get_model(self):
return self._model
def get_serial(self):
return self._serial
def get_status(self):
return self._status
def set_status(self, status):
self._status = status
def get_position_in_parent(self):
return self._position_in_parent
def is_replaceable(self):
return self._replaceable
class MockModule(module_base.ModuleBase):
def __init__(self,
name,
position_in_parent,
presence=True,
model='Module Model',
serial='Module Serial',
status=True):
super(MockModule, self).__init__()
self._name = name
self._presence = presence
self._model = model
self._serial = serial
self._status = status
self._position_in_parent = position_in_parent
self._max_consumed_power = 500.0
def set_maximum_consumed_power(self, consumed_power):
self._max_consumed_power = consumed_power
def get_maximum_consumed_power(self):
return self._max_consumed_power
# Methods inherited from DeviceBase class and related setters
def get_name(self):
return self._name
def get_presence(self):
return self._presence
def set_presence(self, presence):
self._presence = presence
def get_model(self):
return self._model
def get_serial(self):
return self._serial
def get_status(self):
return self._status
def set_status(self, status):
self._status = status
def get_position_in_parent(self):
return self._position_in_parent
def is_replaceable(self):
return self._replaceable
class MockPsu(psu_base.PsuBase):
def __init__(self,
name,
position_in_parent,
presence=True,
model='Module Model',
serial='Module Serial',
status=True,
voltage=12.0,
current=8.0,
power=100.0,
temp=30.00,
temp_high_th=50.0,
voltage_low_th=11.0,
voltage_high_th=13.0,
replaceable=True):
super(MockPsu, self).__init__()
self._name = name
self._presence = presence
self._model = model
self._serial = serial
self._status = status
self._position_in_parent = position_in_parent
self._replaceable = replaceable
self._voltage = voltage
self._current = current
self._power = power
self._temp = temp
self._temp_high_th = temp_high_th
self._voltage_low_th = voltage_low_th
self._voltage_high_th = voltage_high_th
self._status_led_color = self.STATUS_LED_COLOR_OFF
def get_voltage(self):
return self._voltage
def set_voltage(self, voltage):
self._voltage = voltage
def get_current(self):
return self._current
def set_current(self, current):
self._current = current
def get_power(self):
return self._power
def set_power(self, power):
self._power = power
def get_powergood_status(self):
return self._status
def get_temperature(self):
return self._temp
def set_temperature(self, power):
self._temp = temp
def get_temperature_high_threshold(self):
return self._temp_high_th
def get_voltage_high_threshold(self):
return self._voltage_high_th
def get_voltage_low_threshold(self):
return self._voltage_low_th
def get_maximum_supplied_power(self):
return self._max_supplied_power
def set_maximum_supplied_power(self, supplied_power):
self._max_supplied_power = supplied_power
def get_status_led(self):
return self._status_led_color
def set_status_led(self, color):
self._status_led_color = color
return True
# Methods inherited from DeviceBase class and related setters
def get_name(self):
return self._name
def get_presence(self):
return self._presence
def set_presence(self, presence):
self._presence = presence
def get_model(self):
return self._model
def get_serial(self):
return self._serial
def get_status(self):
return self._status
def set_status(self, status):
self._status = status
def get_position_in_parent(self):
return self._position_in_parent
def is_replaceable(self):
return self._replaceable
| 25.662269
| 65
| 0.630989
| 1,162
| 9,726
| 4.892427
| 0.072289
| 0.103782
| 0.137907
| 0.044327
| 0.770976
| 0.724186
| 0.700088
| 0.688127
| 0.688127
| 0.688127
| 0
| 0.004854
| 0.301049
| 9,726
| 378
| 66
| 25.730159
| 0.831421
| 0.030742
| 0
| 0.721014
| 0
| 0
| 0.014648
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.315217
| false
| 0
| 0.018116
| 0.217391
| 0.583333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
6a320adc325afdfdf4dc43827c0c2855f448113f
| 2,433
|
py
|
Python
|
Utils/NetParamsFactory.py
|
AndresOtero/TensorDecompositionMachineLearning
|
455f16b405ec9d031999b0ebf9c5a68d3c20b233
|
[
"MIT"
] | 3
|
2021-06-11T02:46:06.000Z
|
2021-08-17T02:59:30.000Z
|
Utils/NetParamsFactory.py
|
AndresOtero/TensorDecompositionMachineLearning
|
455f16b405ec9d031999b0ebf9c5a68d3c20b233
|
[
"MIT"
] | null | null | null |
Utils/NetParamsFactory.py
|
AndresOtero/TensorDecompositionMachineLearning
|
455f16b405ec9d031999b0ebf9c5a68d3c20b233
|
[
"MIT"
] | null | null | null |
from Utils import Constant, EnumDataset
from Utils.NetParams import NetParams
class NetParamsFactory:
@staticmethod
def create_mnist_net_params(model, learning_rate, optimizer, cuda_is_available, m, rank,batch_size):
net_params = NetParams(model, EnumDataset.MNIST_FLAT_DIVISIONS, learning_rate, optimizer, cuda_is_available,
m=m, rank=rank, tensor_size=Constant.TENSOR_SIZE_MNIST,batch_size=batch_size)
return net_params
@staticmethod
def create_cifar_net_params(model, learning_rate, optimizer, cuda_is_available, m, rank,batch_size):
net_params = NetParams(model, EnumDataset.CIFAR_FLAT_DIVISIONS, learning_rate, optimizer, cuda_is_available,
m=m, rank=rank, tensor_size=Constant.TENSOR_SIZE_CIFAR,
divides_in_row=Constant.DIVIDES_IN_ROW_CIFAR,batch_size=batch_size)
return net_params
@staticmethod
def create_cifar_parallel_net_params(model, learning_rate, optimizer, cuda_is_available, m, rank,batch_size):
net_params = NetParams(model,learning_rate, EnumDataset.CIFAR_PARALLEL_DIVISIONS, optimizer, cuda_is_available,
m=m, rank=rank, tensor_size=Constant.TENSOR_SIZE_CIFAR_PARALLEL,batch_size=batch_size)
return net_params
@staticmethod
def create_fashion_mnist_net_params(model, learning_rate, optimizer, cuda_is_available, m, rank,batch_size):
net_params = NetParams(model, EnumDataset.FASHION_MNIST_FLAT_DIVISIONS, learning_rate, optimizer,
cuda_is_available,m=m, rank=rank, tensor_size=Constant.TENSOR_SIZE_MNIST,batch_size=batch_size)
return net_params
@staticmethod
def create_kuzushiji_mnist_net_params(model, learning_rate, optimizer, cuda_is_available, m, rank,batch_size):
net_params = NetParams(model, EnumDataset.KUZUSHIJI_MNIST_FLAT_DIVISIONS, learning_rate, optimizer,
cuda_is_available,m=m, rank=rank, tensor_size=Constant.TENSOR_SIZE_MNIST,batch_size=batch_size)
return net_params
@staticmethod
def create_IMDB_net_params(model, learning_rate, optimizer, cuda_is_available, m, rank,batch_size):
net_params = NetParams(model, EnumDataset.IMDB, learning_rate, optimizer,
cuda_is_available,m=m, rank=rank,batch_size=batch_size)
return net_params
| 57.928571
| 126
| 0.72873
| 306
| 2,433
| 5.401961
| 0.120915
| 0.098004
| 0.108893
| 0.174229
| 0.84755
| 0.84755
| 0.84755
| 0.827586
| 0.827586
| 0.827586
| 0
| 0
| 0.203453
| 2,433
| 41
| 127
| 59.341463
| 0.852941
| 0
| 0
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0.058824
| 0
| 0.441176
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e005148dd2e10593ed89f233b94e2217d383a130
| 72,873
|
py
|
Python
|
heat/tests/api/cfn/test_api_cfn_v1.py
|
itachaaa/heat
|
a73fa991dbd93fbcfd93c3d19e74f1e8b5a0b870
|
[
"Apache-2.0"
] | null | null | null |
heat/tests/api/cfn/test_api_cfn_v1.py
|
itachaaa/heat
|
a73fa991dbd93fbcfd93c3d19e74f1e8b5a0b870
|
[
"Apache-2.0"
] | null | null | null |
heat/tests/api/cfn/test_api_cfn_v1.py
|
itachaaa/heat
|
a73fa991dbd93fbcfd93c3d19e74f1e8b5a0b870
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import mock
from oslo_config import fixture as config_fixture
import six
from heat.api.aws import exception
import heat.api.cfn.v1.stacks as stacks
from heat.common import exception as heat_exception
from heat.common import identifier
from heat.common import policy
from heat.common import wsgi
from heat.rpc import api as rpc_api
from heat.rpc import client as rpc_client
from heat.tests import common
from heat.tests import utils
policy_path = os.path.dirname(os.path.realpath(__file__)) + "/../../policy/"
class CfnStackControllerTest(common.HeatTestCase):
"""Tests the API class CfnStackController.
Tests the API class which acts as the WSGI controller,
the endpoint processing API requests after they are routed
"""
def setUp(self):
super(CfnStackControllerTest, self).setUp()
self.fixture = self.useFixture(config_fixture.Config())
self.fixture.conf(args=['--config-dir', policy_path])
self.topic = rpc_api.ENGINE_TOPIC
self.api_version = '1.0'
self.template = {u'AWSTemplateFormatVersion': u'2010-09-09',
u'Foo': u'bar'}
# Create WSGI controller instance
class DummyConfig(object):
bind_port = 8000
cfgopts = DummyConfig()
self.controller = stacks.StackController(options=cfgopts)
self.controller.policy.enforcer.policy_path = (policy_path +
'deny_stack_user.json')
self.addCleanup(self.m.VerifyAll)
def test_default(self):
self.assertRaises(
exception.HeatInvalidActionError, self.controller.default, None)
def _dummy_GET_request(self, params=None):
# Mangle the params dict into a query string
params = params or {}
qs = "&".join(["=".join([k, str(params[k])]) for k in params])
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': qs}
req = wsgi.Request(environ)
req.context = utils.dummy_context()
return req
def _stub_enforce(self, req, action, allowed=True):
mock_enforce = self.patchobject(policy.Enforcer, 'enforce')
if allowed:
mock_enforce.return_value = True
else:
mock_enforce.side_effect = heat_exception.Forbidden
# The tests
def test_stackid_addprefix(self):
self.m.ReplayAll()
response = self.controller._id_format({
'StackName': 'Foo',
'StackId': {
u'tenant': u't',
u'stack_name': u'Foo',
u'stack_id': u'123',
u'path': u''
}
})
expected = {'StackName': 'Foo',
'StackId': 'arn:openstack:heat::t:stacks/Foo/123'}
self.assertEqual(expected, response)
def test_enforce_ok(self):
params = {'Action': 'ListStacks'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'ListStacks')
response = self.controller._enforce(dummy_req, 'ListStacks')
self.assertIsNone(response)
def test_enforce_denied(self):
self.m.ReplayAll()
params = {'Action': 'ListStacks'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'ListStacks', False)
self.assertRaises(exception.HeatAccessDeniedError,
self.controller._enforce, dummy_req, 'ListStacks')
def test_enforce_ise(self):
params = {'Action': 'ListStacks'}
dummy_req = self._dummy_GET_request(params)
dummy_req.context.roles = ['heat_stack_user']
mock_enforce = self.patchobject(policy.Enforcer, 'enforce')
mock_enforce.side_effect = AttributeError
self.assertRaises(exception.HeatInternalFailureError,
self.controller._enforce, dummy_req, 'ListStacks')
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_list(self, mock_call):
# Format a dummy GET request to pass into the WSGI handler
params = {'Action': 'ListStacks'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'ListStacks')
# Stub out the RPC call to the engine with a pre-canned response
engine_resp = [{u'stack_identity': {u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'1',
u'path': u''},
u'updated_time': u'2012-07-09T09:13:11Z',
u'template_description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': u'wordpress',
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE'}]
mock_call.return_value = engine_resp
# Call the list controller function and compare the response
result = self.controller.list(dummy_req)
expected = {'ListStacksResponse': {'ListStacksResult':
{'StackSummaries':
[{u'StackId': u'arn:openstack:heat::t:stacks/wordpress/1',
u'LastUpdatedTime': u'2012-07-09T09:13:11Z',
u'TemplateDescription': u'blah',
u'StackStatusReason': u'Stack successfully created',
u'CreationTime': u'2012-07-09T09:12:45Z',
u'StackName': u'wordpress',
u'StackStatus': u'CREATE_COMPLETE'}]}}}
self.assertEqual(expected, result)
default_args = {'limit': None, 'sort_keys': None, 'marker': None,
'sort_dir': None, 'filters': None,
'show_deleted': False, 'show_nested': False,
'show_hidden': False, 'tags': None,
'tags_any': None, 'not_tags': None,
'not_tags_any': None}
mock_call.assert_called_once_with(
dummy_req.context, ('list_stacks', default_args), version='1.33')
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_list_rmt_aterr(self, mock_call):
params = {'Action': 'ListStacks'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'ListStacks')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
mock_call.side_effect = AttributeError
# Call the list controller function and compare the response
result = self.controller.list(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
mock_call.assert_called_once_with(
dummy_req.context, ('list_stacks', mock.ANY), version='1.33')
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_list_rmt_interr(self, mock_call):
params = {'Action': 'ListStacks'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'ListStacks')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
mock_call.side_effect = Exception()
# Call the list controller function and compare the response
result = self.controller.list(dummy_req)
self.assertIsInstance(result, exception.HeatInternalFailureError)
mock_call.assert_called_once_with(
dummy_req.context, ('list_stacks', mock.ANY), version='1.33')
def test_describe_last_updated_time(self):
params = {'Action': 'DescribeStacks'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStacks')
engine_resp = [{u'updated_time': '1970-01-01',
u'parameters': {},
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE'}]
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context, ('show_stack', {'stack_identity': None,
'resolve_outputs': True}),
version='1.20'
).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.describe(dummy_req)
result = response['DescribeStacksResponse']['DescribeStacksResult']
stack = result['Stacks'][0]
self.assertEqual('1970-01-01', stack['LastUpdatedTime'])
def test_describe_no_last_updated_time(self):
params = {'Action': 'DescribeStacks'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStacks')
engine_resp = [{u'updated_time': None,
u'parameters': {},
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE'}]
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context, ('show_stack', {'stack_identity': None,
'resolve_outputs': True}),
version='1.20'
).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.describe(dummy_req)
result = response['DescribeStacksResponse']['DescribeStacksResult']
stack = result['Stacks'][0]
self.assertNotIn('LastUpdatedTime', stack)
def test_describe(self):
# Format a dummy GET request to pass into the WSGI handler
stack_name = u"wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'DescribeStacks', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStacks')
# Stub out the RPC call to the engine with a pre-canned response
# Note the engine returns a load of keys we don't actually use
# so this is a subset of the real response format
engine_resp = [{u'stack_identity':
{u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u''},
u'updated_time': u'2012-07-09T09:13:11Z',
u'parameters': {u'DBUsername': u'admin',
u'LinuxDistribution': u'F17',
u'InstanceType': u'm1.large',
u'DBRootPassword': u'admin',
u'DBPassword': u'admin',
u'DBName': u'wordpress'},
u'outputs':
[{u'output_key': u'WebsiteURL',
u'description': u'URL for Wordpress wiki',
u'output_value': u'http://10.0.0.8/wordpress'}],
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': u'wordpress',
u'notification_topics': [],
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE',
u'description': u'blah',
u'disable_rollback': 'true',
u'timeout_mins':60,
u'capabilities':[]}]
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context,
('identify_stack', {'stack_name': stack_name})
).AndReturn(identity)
rpc_client.EngineClient.call(
dummy_req.context,
('show_stack', {'stack_identity': identity,
'resolve_outputs': True}),
version='1.20'
).AndReturn(engine_resp)
self.m.ReplayAll()
# Call the list controller function and compare the response
response = self.controller.describe(dummy_req)
expected = {'DescribeStacksResponse':
{'DescribeStacksResult':
{'Stacks':
[{'StackId': u'arn:openstack:heat::t:stacks/wordpress/6',
'StackStatusReason': u'Stack successfully created',
'Description': u'blah',
'Parameters':
[{'ParameterValue': u'wordpress',
'ParameterKey': u'DBName'},
{'ParameterValue': u'admin',
'ParameterKey': u'DBPassword'},
{'ParameterValue': u'admin',
'ParameterKey': u'DBRootPassword'},
{'ParameterValue': u'admin',
'ParameterKey': u'DBUsername'},
{'ParameterValue': u'm1.large',
'ParameterKey': u'InstanceType'},
{'ParameterValue': u'F17',
'ParameterKey': u'LinuxDistribution'}],
'Outputs':
[{'OutputKey': u'WebsiteURL',
'OutputValue': u'http://10.0.0.8/wordpress',
'Description': u'URL for Wordpress wiki'}],
'TimeoutInMinutes': 60,
'CreationTime': u'2012-07-09T09:12:45Z',
'Capabilities': [],
'StackName': u'wordpress',
'NotificationARNs': [],
'StackStatus': u'CREATE_COMPLETE',
'DisableRollback': 'true',
'LastUpdatedTime': u'2012-07-09T09:13:11Z'}]}}}
stacks = (response['DescribeStacksResponse']['DescribeStacksResult']
['Stacks'])
stacks[0]['Parameters'] = sorted(
stacks[0]['Parameters'], key=lambda k: k['ParameterKey'])
response['DescribeStacksResponse']['DescribeStacksResult'] = (
{'Stacks': stacks})
self.assertEqual(expected, response)
def test_describe_arn(self):
# Format a dummy GET request to pass into the WSGI handler
stack_name = u"wordpress"
stack_identifier = identifier.HeatIdentifier('t', stack_name, '6')
identity = dict(stack_identifier)
params = {'Action': 'DescribeStacks',
'StackName': stack_identifier.arn()}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStacks')
# Stub out the RPC call to the engine with a pre-canned response
# Note the engine returns a load of keys we don't actually use
# so this is a subset of the real response format
engine_resp = [{u'stack_identity': {u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u''},
u'updated_time': u'2012-07-09T09:13:11Z',
u'parameters': {u'DBUsername': u'admin',
u'LinuxDistribution': u'F17',
u'InstanceType': u'm1.large',
u'DBRootPassword': u'admin',
u'DBPassword': u'admin',
u'DBName': u'wordpress'},
u'outputs':
[{u'output_key': u'WebsiteURL',
u'description': u'URL for Wordpress wiki',
u'output_value': u'http://10.0.0.8/wordpress'}],
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': u'wordpress',
u'notification_topics': [],
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE',
u'description': u'blah',
u'disable_rollback': 'true',
u'timeout_mins':60,
u'capabilities':[]}]
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context,
('show_stack', {'stack_identity': identity,
'resolve_outputs': True}),
version='1.20'
).AndReturn(engine_resp)
self.m.ReplayAll()
# Call the list controller function and compare the response
response = self.controller.describe(dummy_req)
expected = {'DescribeStacksResponse':
{'DescribeStacksResult':
{'Stacks':
[{'StackId': u'arn:openstack:heat::t:stacks/wordpress/6',
'StackStatusReason': u'Stack successfully created',
'Description': u'blah',
'Parameters':
[{'ParameterValue': u'wordpress',
'ParameterKey': u'DBName'},
{'ParameterValue': u'admin',
'ParameterKey': u'DBPassword'},
{'ParameterValue': u'admin',
'ParameterKey': u'DBRootPassword'},
{'ParameterValue': u'admin',
'ParameterKey': u'DBUsername'},
{'ParameterValue': u'm1.large',
'ParameterKey': u'InstanceType'},
{'ParameterValue': u'F17',
'ParameterKey': u'LinuxDistribution'}],
'Outputs':
[{'OutputKey': u'WebsiteURL',
'OutputValue': u'http://10.0.0.8/wordpress',
'Description': u'URL for Wordpress wiki'}],
'TimeoutInMinutes': 60,
'CreationTime': u'2012-07-09T09:12:45Z',
'Capabilities': [],
'StackName': u'wordpress',
'NotificationARNs': [],
'StackStatus': u'CREATE_COMPLETE',
'DisableRollback': 'true',
'LastUpdatedTime': u'2012-07-09T09:13:11Z'}]}}}
stacks = (response['DescribeStacksResponse']['DescribeStacksResult']
['Stacks'])
stacks[0]['Parameters'] = sorted(
stacks[0]['Parameters'], key=lambda k: k['ParameterKey'])
response['DescribeStacksResponse']['DescribeStacksResult'] = (
{'Stacks': stacks})
self.assertEqual(expected, response)
def test_describe_arn_invalidtenant(self):
# Format a dummy GET request to pass into the WSGI handler
stack_name = u"wordpress"
stack_identifier = identifier.HeatIdentifier('wibble', stack_name, '6')
identity = dict(stack_identifier)
params = {'Action': 'DescribeStacks',
'StackName': stack_identifier.arn()}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStacks')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context, ('show_stack', {'stack_identity': identity,
'resolve_outputs': True},),
version='1.20'
).AndRaise(heat_exception.InvalidTenant(target='test',
actual='test'))
self.m.ReplayAll()
result = self.controller.describe(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_describe_aterr(self):
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'DescribeStacks', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStacks')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
).AndReturn(identity)
rpc_client.EngineClient.call(
dummy_req.context, ('show_stack', {'stack_identity': identity,
'resolve_outputs': True}),
version='1.20'
).AndRaise(AttributeError())
self.m.ReplayAll()
result = self.controller.describe(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_describe_bad_name(self):
stack_name = "wibble"
params = {'Action': 'DescribeStacks', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStacks')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
).AndRaise(heat_exception.EntityNotFound(entity='Stack', name='test'))
self.m.ReplayAll()
result = self.controller.describe(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_get_template_int_body(self):
"""Test the internal _get_template function."""
params = {'TemplateBody': "abcdef"}
dummy_req = self._dummy_GET_request(params)
result = self.controller._get_template(dummy_req)
expected = "abcdef"
self.assertEqual(expected, result)
# TODO(shardy) : test the _get_template TemplateUrl case
def _stub_rpc_create_stack_call_failure(self, req_context, stack_name,
engine_parms, engine_args,
failure, need_stub=True):
if need_stub:
mock_enforce = self.patchobject(policy.Enforcer, 'enforce')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
mock_enforce.return_value = True
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
rpc_client.EngineClient.call(
req_context,
('create_stack',
{'stack_name': stack_name,
'template': self.template,
'params': engine_parms,
'files': {},
'environment_files': None,
'args': engine_args,
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
version='1.29'
).AndRaise(failure)
def _stub_rpc_create_stack_call_success(self, stack_name, engine_parms,
engine_args, parameters):
dummy_req = self._dummy_GET_request(parameters)
self._stub_enforce(dummy_req, 'CreateStack')
# Stub out the RPC call to the engine with a pre-canned response
engine_resp = {u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'1',
u'path': u''}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context,
('create_stack',
{'stack_name': stack_name,
'template': self.template,
'params': engine_parms,
'files': {},
'environment_files': None,
'args': engine_args,
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
version='1.29'
).AndReturn(engine_resp)
self.m.ReplayAll()
return dummy_req
def test_create(self):
# Format a dummy request
stack_name = "wordpress"
json_template = json.dumps(self.template)
params = {'Action': 'CreateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'TimeoutInMinutes': 30,
'DisableRollback': 'true',
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
engine_parms = {u'InstanceType': u'm1.xlarge'}
engine_args = {'timeout_mins': u'30', 'disable_rollback': 'true'}
dummy_req = self._stub_rpc_create_stack_call_success(stack_name,
engine_parms,
engine_args,
params)
response = self.controller.create(dummy_req)
expected = {
'CreateStackResponse': {
'CreateStackResult': {
u'StackId': u'arn:openstack:heat::t:stacks/wordpress/1'
}
}
}
self.assertEqual(expected, response)
def test_create_rollback(self):
# Format a dummy request
stack_name = "wordpress"
json_template = json.dumps(self.template)
params = {'Action': 'CreateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'TimeoutInMinutes': 30,
'DisableRollback': 'false',
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
engine_parms = {u'InstanceType': u'm1.xlarge'}
engine_args = {'timeout_mins': u'30', 'disable_rollback': 'false'}
dummy_req = self._stub_rpc_create_stack_call_success(stack_name,
engine_parms,
engine_args,
params)
response = self.controller.create(dummy_req)
expected = {
'CreateStackResponse': {
'CreateStackResult': {
u'StackId': u'arn:openstack:heat::t:stacks/wordpress/1'
}
}
}
self.assertEqual(expected, response)
def test_create_onfailure_true(self):
# Format a dummy request
stack_name = "wordpress"
json_template = json.dumps(self.template)
params = {'Action': 'CreateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'TimeoutInMinutes': 30,
'OnFailure': 'DO_NOTHING',
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
engine_parms = {u'InstanceType': u'm1.xlarge'}
engine_args = {'timeout_mins': u'30', 'disable_rollback': 'true'}
dummy_req = self._stub_rpc_create_stack_call_success(stack_name,
engine_parms,
engine_args,
params)
response = self.controller.create(dummy_req)
expected = {
'CreateStackResponse': {
'CreateStackResult': {
u'StackId': u'arn:openstack:heat::t:stacks/wordpress/1'
}
}
}
self.assertEqual(expected, response)
def test_create_onfailure_false_delete(self):
# Format a dummy request
stack_name = "wordpress"
json_template = json.dumps(self.template)
params = {'Action': 'CreateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'TimeoutInMinutes': 30,
'OnFailure': 'DELETE',
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
engine_parms = {u'InstanceType': u'm1.xlarge'}
engine_args = {'timeout_mins': u'30', 'disable_rollback': 'false'}
dummy_req = self._stub_rpc_create_stack_call_success(stack_name,
engine_parms,
engine_args,
params)
response = self.controller.create(dummy_req)
expected = {
'CreateStackResponse': {
'CreateStackResult': {
u'StackId': u'arn:openstack:heat::t:stacks/wordpress/1'
}
}
}
self.assertEqual(expected, response)
def test_create_onfailure_false_rollback(self):
# Format a dummy request
stack_name = "wordpress"
json_template = json.dumps(self.template)
params = {'Action': 'CreateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'TimeoutInMinutes': 30,
'OnFailure': 'ROLLBACK',
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
engine_parms = {u'InstanceType': u'm1.xlarge'}
engine_args = {'timeout_mins': u'30', 'disable_rollback': 'false'}
dummy_req = self._stub_rpc_create_stack_call_success(stack_name,
engine_parms,
engine_args,
params)
response = self.controller.create(dummy_req)
expected = {
'CreateStackResponse': {
'CreateStackResult': {
u'StackId': u'arn:openstack:heat::t:stacks/wordpress/1'
}
}
}
self.assertEqual(expected, response)
def test_create_onfailure_err(self):
# Format a dummy request
stack_name = "wordpress"
json_template = json.dumps(self.template)
params = {'Action': 'CreateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'TimeoutInMinutes': 30,
'DisableRollback': 'true',
'OnFailure': 'DO_NOTHING',
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'CreateStack')
self.assertRaises(exception.HeatInvalidParameterCombinationError,
self.controller.create, dummy_req)
def test_create_err_no_template(self):
# Format a dummy request with a missing template field
stack_name = "wordpress"
params = {'Action': 'CreateStack', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'CreateStack')
result = self.controller.create(dummy_req)
self.assertIsInstance(result, exception.HeatMissingParameterError)
def test_create_err_inval_template(self):
# Format a dummy request with an invalid TemplateBody
stack_name = "wordpress"
json_template = "!$%**_+}@~?"
params = {'Action': 'CreateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'CreateStack')
result = self.controller.create(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_create_err_rpcerr(self):
# Format a dummy request
stack_name = "wordpress"
json_template = json.dumps(self.template)
params = {'Action': 'CreateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'TimeoutInMinutes': 30,
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
engine_parms = {u'InstanceType': u'm1.xlarge'}
engine_args = {'timeout_mins': u'30'}
dummy_req = self._dummy_GET_request(params)
self._stub_rpc_create_stack_call_failure(dummy_req.context,
stack_name,
engine_parms,
engine_args,
AttributeError())
failure = heat_exception.UnknownUserParameter(key='test')
self._stub_rpc_create_stack_call_failure(dummy_req.context,
stack_name,
engine_parms,
engine_args,
failure,
False)
failure = heat_exception.UserParameterMissing(key='test')
self._stub_rpc_create_stack_call_failure(dummy_req.context,
stack_name,
engine_parms,
engine_args,
failure,
False)
self.m.ReplayAll()
result = self.controller.create(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
result = self.controller.create(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
result = self.controller.create(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_create_err_exists(self):
# Format a dummy request
stack_name = "wordpress"
json_template = json.dumps(self.template)
params = {'Action': 'CreateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'TimeoutInMinutes': 30,
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
engine_parms = {u'InstanceType': u'm1.xlarge'}
engine_args = {'timeout_mins': u'30'}
failure = heat_exception.StackExists(stack_name='test')
dummy_req = self._dummy_GET_request(params)
self._stub_rpc_create_stack_call_failure(dummy_req.context,
stack_name,
engine_parms,
engine_args,
failure)
self.m.ReplayAll()
result = self.controller.create(dummy_req)
self.assertIsInstance(result, exception.AlreadyExistsError)
def test_create_err_engine(self):
# Format a dummy request
stack_name = "wordpress"
json_template = json.dumps(self.template)
params = {'Action': 'CreateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'TimeoutInMinutes': 30,
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
engine_parms = {u'InstanceType': u'm1.xlarge'}
engine_args = {'timeout_mins': u'30'}
failure = heat_exception.StackValidationFailed(
message='Something went wrong')
dummy_req = self._dummy_GET_request(params)
self._stub_rpc_create_stack_call_failure(dummy_req.context,
stack_name,
engine_parms,
engine_args,
failure)
self.m.ReplayAll()
result = self.controller.create(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_update(self):
# Format a dummy request
stack_name = "wordpress"
json_template = json.dumps(self.template)
params = {'Action': 'UpdateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
engine_parms = {u'InstanceType': u'm1.xlarge'}
engine_args = {}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'UpdateStack')
# Stub out the RPC call to the engine with a pre-canned response
identity = dict(identifier.HeatIdentifier('t', stack_name, '1'))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context,
('identify_stack', {'stack_name': stack_name})
).AndReturn(identity)
rpc_client.EngineClient.call(
dummy_req.context,
('update_stack',
{'stack_identity': identity,
'template': self.template,
'params': engine_parms,
'files': {},
'environment_files': None,
'args': engine_args,
'template_id': None}),
version='1.29'
).AndReturn(identity)
self.m.ReplayAll()
response = self.controller.update(dummy_req)
expected = {
'UpdateStackResponse': {
'UpdateStackResult': {
u'StackId': u'arn:openstack:heat::t:stacks/wordpress/1'
}
}
}
self.assertEqual(expected, response)
def test_cancel_update(self):
# Format a dummy request
stack_name = "wordpress"
params = {'Action': 'CancelUpdateStack', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'CancelUpdateStack')
# Stub out the RPC call to the engine with a pre-canned response
identity = dict(identifier.HeatIdentifier('t', stack_name, '1'))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context,
('identify_stack', {'stack_name': stack_name})
).AndReturn(identity)
rpc_client.EngineClient.call(
dummy_req.context,
('stack_cancel_update',
{'stack_identity': identity,
'cancel_with_rollback': True}),
version='1.14'
).AndReturn(identity)
self.m.ReplayAll()
response = self.controller.cancel_update(dummy_req)
expected = {
'CancelUpdateStackResponse': {
'CancelUpdateStackResult': {}
}
}
self.assertEqual(response, expected)
def test_update_bad_name(self):
stack_name = "wibble"
json_template = json.dumps(self.template)
params = {'Action': 'UpdateStack', 'StackName': stack_name,
'TemplateBody': '%s' % json_template,
'Parameters.member.1.ParameterKey': 'InstanceType',
'Parameters.member.1.ParameterValue': 'm1.xlarge'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'UpdateStack')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context,
('identify_stack', {'stack_name': stack_name})
).AndRaise(heat_exception.EntityNotFound(entity='Stack', name='test'))
self.m.ReplayAll()
result = self.controller.update(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_create_or_update_err(self):
result = self.controller.create_or_update(req={}, action="dsdgfdf")
self.assertIsInstance(result, exception.HeatInternalFailureError)
def test_get_template(self):
# Format a dummy request
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'GetTemplate', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'GetTemplate')
# Stub out the RPC call to the engine with a pre-canned response
engine_resp = self.template
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context,
('identify_stack', {'stack_name': stack_name})
).AndReturn(identity)
rpc_client.EngineClient.call(
dummy_req.context,
('get_template', {'stack_identity': identity})
).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.get_template(dummy_req)
expected = {'GetTemplateResponse':
{'GetTemplateResult':
{'TemplateBody': self.template}}}
self.assertEqual(expected, response)
def test_get_template_err_rpcerr(self):
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'GetTemplate', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'GetTemplate')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
).AndReturn(identity)
rpc_client.EngineClient.call(
dummy_req.context, ('get_template', {'stack_identity': identity})
).AndRaise(AttributeError())
self.m.ReplayAll()
result = self.controller.get_template(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_get_template_bad_name(self):
stack_name = "wibble"
params = {'Action': 'GetTemplate', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'GetTemplate')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context,
('identify_stack', {'stack_name': stack_name})
).AndRaise(heat_exception.EntityNotFound(entity='Stack', name='test'))
self.m.ReplayAll()
result = self.controller.get_template(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_validate_err_no_template(self):
# Format a dummy request with a missing template field
params = {'Action': 'ValidateTemplate'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'ValidateTemplate')
result = self.controller.validate_template(dummy_req)
self.assertIsInstance(result, exception.HeatMissingParameterError)
def test_validate_err_inval_template(self):
# Format a dummy request with an invalid TemplateBody
json_template = "!$%**_+}@~?"
params = {'Action': 'ValidateTemplate',
'TemplateBody': '%s' % json_template}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'ValidateTemplate')
result = self.controller.validate_template(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_bad_resources_in_template(self):
# Format a dummy request
json_template = {
'AWSTemplateFormatVersion': '2010-09-09',
'Resources': {
'Type': 'AWS: : EC2: : Instance',
},
}
params = {'Action': 'ValidateTemplate',
'TemplateBody': '%s' % json.dumps(json_template)}
response = {'Error': 'Resources must contain Resource. '
'Found a [string] instead'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'ValidateTemplate')
# Stub out the RPC call to the engine with a pre-canned response
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context,
('validate_template', {'template': json_template, 'params': None,
'files': None, 'environment_files': None,
'show_nested': False,
'ignorable_errors': None}),
version='1.24'
).AndReturn(response)
self.m.ReplayAll()
response = self.controller.validate_template(dummy_req)
expected = {'ValidateTemplateResponse':
{'ValidateTemplateResult':
'Resources must contain Resource. '
'Found a [string] instead'}}
self.assertEqual(expected, response)
def test_delete(self):
# Format a dummy request
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '1'))
params = {'Action': 'DeleteStack', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DeleteStack')
# Stub out the RPC call to the engine with a pre-canned response
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
).AndReturn(identity)
# Engine returns None when delete successful
rpc_client.EngineClient.call(
dummy_req.context,
('delete_stack', {'stack_identity': identity})
).AndReturn(None)
self.m.ReplayAll()
response = self.controller.delete(dummy_req)
expected = {'DeleteStackResponse': {'DeleteStackResult': ''}}
self.assertEqual(expected, response)
def test_delete_err_rpcerr(self):
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '1'))
params = {'Action': 'DeleteStack', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DeleteStack')
# Stub out the RPC call to the engine with a pre-canned response
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
).AndReturn(identity)
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
rpc_client.EngineClient.call(
dummy_req.context, ('delete_stack', {'stack_identity': identity})
).AndRaise(AttributeError())
self.m.ReplayAll()
result = self.controller.delete(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_delete_bad_name(self):
stack_name = "wibble"
params = {'Action': 'DeleteStack', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DeleteStack')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
).AndRaise(heat_exception.EntityNotFound(entity='Stack', name='test'))
self.m.ReplayAll()
result = self.controller.delete(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_events_list_event_id_integer(self):
self._test_events_list('42')
def test_events_list_event_id_uuid(self):
self._test_events_list('a3455d8c-9f88-404d-a85b-5315293e67de')
def _test_events_list(self, event_id):
# Format a dummy request
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'DescribeStackEvents', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackEvents')
# Stub out the RPC call to the engine with a pre-canned response
engine_resp = [{u'stack_name': u'wordpress',
u'event_time': u'2012-07-23T13:05:39Z',
u'stack_identity': {u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u''},
u'resource_name': u'WikiDatabase',
u'resource_status_reason': u'state changed',
u'event_identity':
{u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u'/resources/WikiDatabase/events/{0}'.format(
event_id)},
u'resource_action': u'TEST',
u'resource_status': u'IN_PROGRESS',
u'physical_resource_id': None,
u'resource_properties': {u'UserData': u'blah'},
u'resource_type': u'AWS::EC2::Instance'}]
kwargs = {'stack_identity': identity, 'nested_depth': None,
'limit': None, 'sort_keys': None, 'marker': None,
'sort_dir': None, 'filters': None}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
).AndReturn(identity)
rpc_client.EngineClient.call(
dummy_req.context,
('list_events', kwargs),
version='1.31'
).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.events_list(dummy_req)
expected = {'DescribeStackEventsResponse':
{'DescribeStackEventsResult':
{'StackEvents':
[{'EventId': six.text_type(event_id),
'StackId': u'arn:openstack:heat::t:stacks/wordpress/6',
'ResourceStatus': u'TEST_IN_PROGRESS',
'ResourceType': u'AWS::EC2::Instance',
'Timestamp': u'2012-07-23T13:05:39Z',
'StackName': u'wordpress',
'ResourceProperties':
json.dumps({u'UserData': u'blah'}),
'PhysicalResourceId': None,
'ResourceStatusReason': u'state changed',
'LogicalResourceId': u'WikiDatabase'}]}}}
self.assertEqual(expected, response)
def test_events_list_err_rpcerr(self):
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'DescribeStackEvents', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackEvents')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
).AndReturn(identity)
rpc_client.EngineClient.call(
dummy_req.context,
('list_events', {'stack_identity': identity}),
version='1.31'
).AndRaise(Exception())
self.m.ReplayAll()
result = self.controller.events_list(dummy_req)
self.assertIsInstance(result, exception.HeatInternalFailureError)
def test_events_list_bad_name(self):
stack_name = "wibble"
params = {'Action': 'DescribeStackEvents', 'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackEvents')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
).AndRaise(heat_exception.EntityNotFound(entity='Stack', name='test'))
self.m.ReplayAll()
result = self.controller.events_list(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_describe_stack_resource(self):
# Format a dummy request
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'DescribeStackResource',
'StackName': stack_name,
'LogicalResourceId': "WikiDatabase"}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackResource')
# Stub out the RPC call to the engine with a pre-canned response
engine_resp = {u'description': u'',
u'resource_identity': {
u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u'resources/WikiDatabase'
},
u'stack_name': u'wordpress',
u'resource_name': u'WikiDatabase',
u'resource_status_reason': None,
u'updated_time': u'2012-07-23T13:06:00Z',
u'stack_identity': {u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u''},
u'resource_action': u'CREATE',
u'resource_status': u'COMPLETE',
u'physical_resource_id':
u'a3455d8c-9f88-404d-a85b-5315293e67de',
u'resource_type': u'AWS::EC2::Instance',
u'metadata': {u'wordpress': []}}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
).AndReturn(identity)
args = {
'stack_identity': identity,
'resource_name': dummy_req.params.get('LogicalResourceId'),
'with_attr': False,
}
rpc_client.EngineClient.call(
dummy_req.context, ('describe_stack_resource', args), version='1.2'
).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.describe_stack_resource(dummy_req)
expected = {'DescribeStackResourceResponse':
{'DescribeStackResourceResult':
{'StackResourceDetail':
{'StackId': u'arn:openstack:heat::t:stacks/wordpress/6',
'ResourceStatus': u'CREATE_COMPLETE',
'Description': u'',
'ResourceType': u'AWS::EC2::Instance',
'ResourceStatusReason': None,
'LastUpdatedTimestamp': u'2012-07-23T13:06:00Z',
'StackName': u'wordpress',
'PhysicalResourceId':
u'a3455d8c-9f88-404d-a85b-5315293e67de',
'Metadata': {u'wordpress': []},
'LogicalResourceId': u'WikiDatabase'}}}}
self.assertEqual(expected, response)
def test_describe_stack_resource_nonexistent_stack(self):
# Format a dummy request
stack_name = "wibble"
params = {'Action': 'DescribeStackResource',
'StackName': stack_name,
'LogicalResourceId': "WikiDatabase"}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackResource')
# Stub out the RPC call to the engine with a pre-canned response
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
).AndRaise(heat_exception.EntityNotFound(entity='Stack', name='test'))
self.m.ReplayAll()
result = self.controller.describe_stack_resource(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_describe_stack_resource_nonexistent(self):
# Format a dummy request
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'DescribeStackResource',
'StackName': stack_name,
'LogicalResourceId': "wibble"}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackResource')
# Stub out the RPC call to the engine with a pre-canned response
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
).AndReturn(identity)
args = {
'stack_identity': identity,
'resource_name': dummy_req.params.get('LogicalResourceId'),
'with_attr': False,
}
rpc_client.EngineClient.call(
dummy_req.context, ('describe_stack_resource', args), version='1.2'
).AndRaise(heat_exception.ResourceNotFound(
resource_name='test', stack_name='test'))
self.m.ReplayAll()
result = self.controller.describe_stack_resource(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_describe_stack_resources(self):
# Format a dummy request
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'DescribeStackResources',
'StackName': stack_name,
'LogicalResourceId': "WikiDatabase"}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackResources')
# Stub out the RPC call to the engine with a pre-canned response
engine_resp = [{u'description': u'',
u'resource_identity': {
u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u'resources/WikiDatabase'
},
u'stack_name': u'wordpress',
u'resource_name': u'WikiDatabase',
u'resource_status_reason': None,
u'updated_time': u'2012-07-23T13:06:00Z',
u'stack_identity': {u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u''},
u'resource_action': u'CREATE',
u'resource_status': u'COMPLETE',
u'physical_resource_id':
u'a3455d8c-9f88-404d-a85b-5315293e67de',
u'resource_type': u'AWS::EC2::Instance',
u'metadata': {u'ensureRunning': u'true''true'}}]
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
).AndReturn(identity)
args = {
'stack_identity': identity,
'resource_name': dummy_req.params.get('LogicalResourceId'),
}
rpc_client.EngineClient.call(
dummy_req.context, ('describe_stack_resources', args)
).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.describe_stack_resources(dummy_req)
expected = {'DescribeStackResourcesResponse':
{'DescribeStackResourcesResult':
{'StackResources':
[{'StackId': u'arn:openstack:heat::t:stacks/wordpress/6',
'ResourceStatus': u'CREATE_COMPLETE',
'Description': u'',
'ResourceType': u'AWS::EC2::Instance',
'Timestamp': u'2012-07-23T13:06:00Z',
'ResourceStatusReason': None,
'StackName': u'wordpress',
'PhysicalResourceId':
u'a3455d8c-9f88-404d-a85b-5315293e67de',
'LogicalResourceId': u'WikiDatabase'}]}}}
self.assertEqual(expected, response)
def test_describe_stack_resources_bad_name(self):
stack_name = "wibble"
params = {'Action': 'DescribeStackResources',
'StackName': stack_name,
'LogicalResourceId': "WikiDatabase"}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackResources')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
).AndRaise(heat_exception.EntityNotFound(entity='Stack', name='test'))
self.m.ReplayAll()
result = self.controller.describe_stack_resources(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
def test_describe_stack_resources_physical(self):
# Format a dummy request
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'DescribeStackResources',
'LogicalResourceId': "WikiDatabase",
'PhysicalResourceId': 'a3455d8c-9f88-404d-a85b-5315293e67de'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackResources')
# Stub out the RPC call to the engine with a pre-canned response
engine_resp = [{u'description': u'',
u'resource_identity': {
u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u'resources/WikiDatabase'
},
u'stack_name': u'wordpress',
u'resource_name': u'WikiDatabase',
u'resource_status_reason': None,
u'updated_time': u'2012-07-23T13:06:00Z',
u'stack_identity': {u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u''},
u'resource_action': u'CREATE',
u'resource_status': u'COMPLETE',
u'physical_resource_id':
u'a3455d8c-9f88-404d-a85b-5315293e67de',
u'resource_type': u'AWS::EC2::Instance',
u'metadata': {u'ensureRunning': u'true''true'}}]
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context,
('find_physical_resource',
{'physical_resource_id': 'a3455d8c-9f88-404d-a85b-5315293e67de'})
).AndReturn(identity)
args = {
'stack_identity': identity,
'resource_name': dummy_req.params.get('LogicalResourceId'),
}
rpc_client.EngineClient.call(
dummy_req.context, ('describe_stack_resources', args)
).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.describe_stack_resources(dummy_req)
expected = {'DescribeStackResourcesResponse':
{'DescribeStackResourcesResult':
{'StackResources':
[{'StackId': u'arn:openstack:heat::t:stacks/wordpress/6',
'ResourceStatus': u'CREATE_COMPLETE',
'Description': u'',
'ResourceType': u'AWS::EC2::Instance',
'Timestamp': u'2012-07-23T13:06:00Z',
'ResourceStatusReason': None,
'StackName': u'wordpress',
'PhysicalResourceId':
u'a3455d8c-9f88-404d-a85b-5315293e67de',
'LogicalResourceId': u'WikiDatabase'}]}}}
self.assertEqual(expected, response)
def test_describe_stack_resources_physical_not_found(self):
# Format a dummy request
params = {'Action': 'DescribeStackResources',
'LogicalResourceId': "WikiDatabase",
'PhysicalResourceId': 'aaaaaaaa-9f88-404d-cccc-ffffffffffff'}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackResources')
# Stub out the RPC call to the engine with a pre-canned response
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context,
('find_physical_resource',
{'physical_resource_id': 'aaaaaaaa-9f88-404d-cccc-ffffffffffff'})
).AndRaise(heat_exception.EntityNotFound(entity='Resource', name='1'))
self.m.ReplayAll()
response = self.controller.describe_stack_resources(dummy_req)
self.assertIsInstance(response,
exception.HeatInvalidParameterValueError)
def test_describe_stack_resources_err_inval(self):
# Format a dummy request containing both StackName and
# PhysicalResourceId, which is invalid and should throw a
# HeatInvalidParameterCombinationError
stack_name = "wordpress"
params = {'Action': 'DescribeStackResources',
'StackName': stack_name,
'PhysicalResourceId': "123456"}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'DescribeStackResources')
ret = self.controller.describe_stack_resources(dummy_req)
self.assertIsInstance(ret,
exception.HeatInvalidParameterCombinationError)
def test_list_stack_resources(self):
# Format a dummy request
stack_name = "wordpress"
identity = dict(identifier.HeatIdentifier('t', stack_name, '6'))
params = {'Action': 'ListStackResources',
'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'ListStackResources')
# Stub out the RPC call to the engine with a pre-canned response
engine_resp = [{u'resource_identity':
{u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u'/resources/WikiDatabase'},
u'stack_name': u'wordpress',
u'resource_name': u'WikiDatabase',
u'resource_status_reason': None,
u'updated_time': u'2012-07-23T13:06:00Z',
u'stack_identity': {u'tenant': u't',
u'stack_name': u'wordpress',
u'stack_id': u'6',
u'path': u''},
u'resource_action': u'CREATE',
u'resource_status': u'COMPLETE',
u'physical_resource_id':
u'a3455d8c-9f88-404d-a85b-5315293e67de',
u'resource_type': u'AWS::EC2::Instance'}]
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
).AndReturn(identity)
rpc_client.EngineClient.call(
dummy_req.context,
('list_stack_resources', {'stack_identity': identity,
'nested_depth': 0,
'with_detail': False,
'filters': None}),
version='1.25'
).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.list_stack_resources(dummy_req)
expected = {'ListStackResourcesResponse': {'ListStackResourcesResult':
{'StackResourceSummaries':
[{'ResourceStatus': u'CREATE_COMPLETE',
'ResourceType': u'AWS::EC2::Instance',
'ResourceStatusReason': None,
'LastUpdatedTimestamp': u'2012-07-23T13:06:00Z',
'PhysicalResourceId':
u'a3455d8c-9f88-404d-a85b-5315293e67de',
'LogicalResourceId': u'WikiDatabase'}]}}}
self.assertEqual(expected, response)
def test_list_stack_resources_bad_name(self):
stack_name = "wibble"
params = {'Action': 'ListStackResources',
'StackName': stack_name}
dummy_req = self._dummy_GET_request(params)
self._stub_enforce(dummy_req, 'ListStackResources')
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
dummy_req.context, ('identify_stack', {'stack_name': stack_name})
).AndRaise(heat_exception.EntityNotFound(entity='Stack', name='test'))
self.m.ReplayAll()
result = self.controller.list_stack_resources(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
| 44.326642
| 79
| 0.558328
| 6,911
| 72,873
| 5.678773
| 0.06931
| 0.04138
| 0.042807
| 0.050961
| 0.865184
| 0.848061
| 0.822886
| 0.804592
| 0.793992
| 0.778016
| 0
| 0.017675
| 0.335405
| 72,873
| 1,643
| 80
| 44.353621
| 0.792674
| 0.066074
| 0
| 0.756736
| 0
| 0
| 0.223627
| 0.045974
| 0
| 0
| 0
| 0.000609
| 0.04465
| 1
| 0.046189
| false
| 0.006159
| 0.011547
| 0
| 0.060816
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e06bf941fc68525a19f89654017572d846e90c8b
| 15,818
|
py
|
Python
|
main.py
|
DRN2019/24-Solution-Calculator
|
ffa061aceefef8ea609dec5458292f568ab48aab
|
[
"MIT"
] | null | null | null |
main.py
|
DRN2019/24-Solution-Calculator
|
ffa061aceefef8ea609dec5458292f568ab48aab
|
[
"MIT"
] | null | null | null |
main.py
|
DRN2019/24-Solution-Calculator
|
ffa061aceefef8ea609dec5458292f568ab48aab
|
[
"MIT"
] | null | null | null |
"""
This program presents all the solutions possible in the game of 24 using cards as numbers
Programmed by: Darren Wu December 2020
"""
# Getting cards from user
card_string = input("Enter the 4 cards separated by spaces(e.g. 1 4 5 6): ")
# Splits input from user into a 4 item int array
cards = [int(i) for i in card_string.split() if i.isdigit()]
# Function to add, subtract, multiply or divide cards
# Returns a array with the two cards combined
def combine(array, index1, index2, function):
temp = []
for i in array:
temp.append(i)
if(function == 0):
temp[index1] = temp[index1] + temp[index2]
temp[index2] = None
if(function == 1):
temp[index1] = temp[index1] - temp[index2]
temp[index2] = None
if(function == 2):
temp[index1] = temp[index1] * temp[index2]
temp[index2] = None
if(function == 3):
temp[index1] = temp[index1] / temp[index2]
temp[index2] = None
ans = []
for i in range(len(temp)):
if(temp[i] != None):
ans.append(temp[i])
return ans
functions = ['+','-','*','/']
is24 = False
cards1 = []
cards2 = []
ans = []
correct = []
count = 0
for startingCard in range(4):
for secondCard in range(0, len(cards)):
if(startingCard != secondCard):
if(startingCard < secondCard):
# All 4 math operators if starting card is to the left of the second card in the array
for firstStep in range(4):
if(not(cards[secondCard] == 0 and firstStep == 3)):
cards1 = combine(cards, startingCard, secondCard, firstStep)
for startingCard2 in range(3):
for secondCard2 in range(0, len(cards1)):
if(startingCard2 != secondCard2):
if(startingCard2 < secondCard2):
# All 4 math operators if starting card is to the left of the second card in the array
for secondStep in range(4):
if(not(cards1[secondCard2] == 0 and secondStep == 3)):
cards2 = combine(cards1, startingCard2, secondCard2, secondStep)
# All 4 math operators if starting card is to the left of the second card in the array
for thirdStep in range(4):
if(not(cards2[1] == 0 and thirdStep == 3)):
ans = combine(cards2, 0, 1, thirdStep)
# Used abs() instead of == in case answer is 23.999999
if (abs(ans[0] - 24) <= 0.01):
is24 =True
count += 1
if(not (cards in correct)):
correct.append(cards)
print('Solution Found!')
print('First Step: ' + str(cards[startingCard]) + functions[firstStep] + str(cards[secondCard]))
print('Second Step: ' + str(cards1[startingCard2]) + functions[secondStep] + str(cards1[secondCard2]))
print('Third Step: ' + str(cards2[0]) + ' ' + functions[thirdStep] + ' '+ str(cards2[1]))
# Subtracts and divides if starting card is to the right of second card
for thirdStep in range(1,4,2):
if(not(cards2[0] == 0 and thirdStep == 3)):
ans = combine(cards2, 1, 0, thirdStep)
if (abs(ans[0] - 24) <= 0.01):
is24 =True
count += 1
if(not (cards in correct)):
correct.append(cards)
print('Solution Found!')
print('First Step: ' + str(cards[startingCard]) + functions[firstStep] + str(cards[secondCard]))
print('Second Step: ' + str(cards1[startingCard2]) + functions[secondStep] + str(cards1[secondCard2]))
print('Third Step: ' + str(cards2[1]) + ' ' + functions[thirdStep] + ' '+ str(cards2[0]))
else:
# Subtracts and divides if starting card is to the right of second card
for secondStep in range(1,4,2):
if(not(cards1[secondCard2] == 0 and secondStep == 3)):
cards2 = combine(cards1, startingCard2, secondCard2, secondStep)
# All 4 math operators if starting card is to the left of the second card in the array
for thirdStep in range(4):
if(not(cards2[1] == 0 and thirdStep == 3)):
ans = combine(cards2, 0, 1, thirdStep)
if (abs(ans[0] - 24) <= 0.01):
is24 =True
count += 1
if(not (cards in correct)):
correct.append(cards)
print('Solution Found!')
print('First Step: ' + str(cards[startingCard]) + functions[firstStep] + str(cards[secondCard]))
print('Second Step: ' + str(cards1[startingCard2]) + functions[secondStep] + str(cards1[secondCard2]))
print('Third Step: ' + str(cards2[0]) + ' ' + functions[thirdStep] + ' '+ str(cards2[1]))
# Subtracts and divides if starting card is to the right of second card
for thirdStep in range(1,4,2):
if(not(cards2[0] == 0 and thirdStep == 3)):
ans = combine(cards2, 1, 0, thirdStep)
if (abs(ans[0] - 24) <= 0.01):
is24 =True
count += 1
if(not (cards in correct)):
correct.append(cards)
print('Solution Found!')
print('First Step: ' + str(cards[startingCard]) + functions[firstStep] + str(cards[secondCard]))
print('Second Step: ' + str(cards1[startingCard2]) + functions[secondStep] + str(cards1[secondCard2]))
print('Third Step: ' + str(cards2[1]) + ' ' + functions[thirdStep] + ' '+ str(cards2[0]))
else:
# Subtracts and divides if starting card is to the right of second card
for firstStep in range(1,4,2):
if(not(cards[secondCard] == 0 and firstStep == 3)):
cards1 = combine(cards, startingCard, secondCard, firstStep)
for startingCard2 in range(3):
if(startingCard2 == 0):
for secondCard2 in range(0, len(cards1)):
if(startingCard2 != secondCard2):
if(startingCard2 < secondCard2):
# All 4 math operators if starting card is to the left of the second card in the array
for secondStep in range(4):
if(not(cards1[secondCard2] == 0 and secondStep == 3)):
cards2 = combine(cards1, startingCard2, secondCard2, secondStep)
# All 4 math operators if starting card is to the left of the second card in the array
for thirdStep in range(4):
if(not(cards2[1] == 0 and thirdStep == 3)):
ans = combine(cards2, 0, 1, thirdStep)
if (abs(ans[0] - 24) <= 0.01):
is24 =True
count += 1
if(not (cards in correct)):
correct.append(cards)
print('Solution Found!')
print('First Step: ' + str(cards[startingCard]) + functions[firstStep] + str(cards[secondCard]))
print('Second Step: ' + str(cards1[startingCard2]) + functions[secondStep] + str(cards1[secondCard2]))
print('Third Step: ' + str(cards2[0]) + ' ' + functions[thirdStep] + ' '+ str(cards2[1]))
# Subtracts and divides if starting card is to the right of second card
for thirdStep in range(1,4,2):
if(not(cards2[0] == 0 and thirdStep == 3)):
ans = combine(cards2, 1, 0, thirdStep)
if (abs(ans[0] - 24) <= 0.01):
is24 =True
count += 1
if(not (cards in correct)):
correct.append(cards)
print('Solution Found!')
print('First Step: ' + str(cards[startingCard]) + functions[firstStep] + str(cards[secondCard]))
print('Second Step: ' + str(cards1[startingCard2]) + functions[secondStep] + str(cards1[secondCard2]))
print('Third Step: ' + str(cards2[1]) + ' ' + functions[thirdStep] + ' '+ str(cards2[0]))
else:
# Subtracts and divides if starting card is to the right of second card
for secondStep in range(1,4,2):
if(not(cards1[secondCard2] == 0 and secondStep == 3)):
cards2 = combine(cards1, startingCard2, secondCard2, secondStep)
# All 4 math operators if starting card is to the left of the second card in the array
for thirdStep in range(4):
if(not(cards2[1] == 0 and thirdStep == 3)):
ans = combine(cards2, 0, 1, thirdStep)
if (abs(ans[0] - 24) <= 0.01):
is24 =True
count += 1
if(not (cards in correct)):
correct.append(cards)
print('Solution Found!')
print('First Step: ' + str(cards[startingCard]) + functions[firstStep] + str(cards[secondCard]))
print('Second Step: ' + str(cards1[startingCard2]) + functions[secondStep] + str(cards1[secondCard2]))
print('Third Step: ' + str(cards2[0]) + ' ' + functions[thirdStep] + ' '+ str(cards2[1]))
# Subtracts and divides if starting card is to the right of second card
for thirdStep in range(1,4,2):
if(not(cards2[0] == 0 and thirdStep == 3)):
ans = combine(cards2, 1, 0, thirdStep)
if (abs(ans[0] - 24) <= 0.01):
is24 =True
count += 1
if(not (cards in correct)):
correct.append(cards)
print('Solution Found!')
print('First Step: ' + str(cards[startingCard]) + functions[firstStep] + str(cards[secondCard]))
print('Second Step: ' + str(cards1[startingCard2]) + functions[secondStep] + str(cards1[secondCard2]))
print('Third Step: ' + str(cards2[1]) + ' ' + functions[thirdStep] + ' '+ str(cards2[0]))
if(is24):
print('In total I found ' + str(count) + ' solutions!')
else:
print('No solutions found.')
| 79.487437
| 167
| 0.349412
| 1,190
| 15,818
| 4.642857
| 0.097479
| 0.030407
| 0.035475
| 0.040543
| 0.864796
| 0.863348
| 0.863348
| 0.861538
| 0.861538
| 0.853575
| 0
| 0.049543
| 0.571248
| 15,818
| 199
| 168
| 79.487437
| 0.765114
| 0.090783
| 0
| 0.792683
| 0
| 0
| 0.037856
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006098
| false
| 0
| 0
| 0
| 0.012195
| 0.207317
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0eb67f14a099c147f962af5382c33be4d3f9e9fa
| 171
|
py
|
Python
|
life/emails/backend/synthesize.py
|
sehgalvibhor/my_story
|
ebcef515fa32318b5bf0c7f166cd723a48f9f30b
|
[
"MIT"
] | 1
|
2016-04-13T18:33:27.000Z
|
2016-04-13T18:33:27.000Z
|
life/emails/backend/synthesize.py
|
sehgalvibhor/my_story
|
ebcef515fa32318b5bf0c7f166cd723a48f9f30b
|
[
"MIT"
] | null | null | null |
life/emails/backend/synthesize.py
|
sehgalvibhor/my_story
|
ebcef515fa32318b5bf0c7f166cd723a48f9f30b
|
[
"MIT"
] | null | null | null |
import sys
import sqlite3
class Process:
def get_work():
pass
def get_play():
pass
def get_social():
pass
def sent_work():
pass
def sent_play():
pass
| 8.55
| 18
| 0.660819
| 26
| 171
| 4.153846
| 0.461538
| 0.259259
| 0.203704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007752
| 0.245614
| 171
| 19
| 19
| 9
| 0.829457
| 0
| 0
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.384615
| true
| 0.384615
| 0.153846
| 0
| 0.615385
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
0ec2ea666b34785037585bc730de6fd124a4d2d5
| 123
|
py
|
Python
|
enthought/plugins/ipython_shell/view/ipython_shell_view.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/plugins/ipython_shell/view/ipython_shell_view.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/plugins/ipython_shell/view/ipython_shell_view.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from envisage.plugins.ipython_shell.view.ipython_shell_view import *
| 30.75
| 68
| 0.861789
| 17
| 123
| 5.764706
| 0.647059
| 0.244898
| 0.326531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089431
| 123
| 3
| 69
| 41
| 0.875
| 0.097561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0ed0333791a310dbe38dcc3fc28e929daf0201aa
| 4,398
|
py
|
Python
|
tests/end_to_end/scenarios/mam_with_timestamps.py
|
norayr/biboumi
|
805671032d25ee6ce09ed75e8a385c04e9563cdd
|
[
"Zlib"
] | 68
|
2015-01-29T21:07:37.000Z
|
2022-03-20T14:48:07.000Z
|
tests/end_to_end/scenarios/mam_with_timestamps.py
|
norayr/biboumi
|
805671032d25ee6ce09ed75e8a385c04e9563cdd
|
[
"Zlib"
] | 5
|
2016-10-24T18:34:30.000Z
|
2021-08-31T13:30:37.000Z
|
tests/end_to_end/scenarios/mam_with_timestamps.py
|
norayr/biboumi
|
805671032d25ee6ce09ed75e8a385c04e9563cdd
|
[
"Zlib"
] | 13
|
2015-12-11T15:19:05.000Z
|
2021-08-31T13:24:35.000Z
|
from scenarios import *
scenario = (
scenarios.simple_channel_join.scenario,
# Send two channel messages
send_stanza("<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>coucou</body></message>"),
expect_stanza("/message[@from='#foo%{irc_server_one}/{nick_one}'][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='coucou']",
"/message/stable_id:stanza-id[@by='#foo%{irc_server_one}'][@id]"),
send_stanza("<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>coucou 2</body></message>"),
# Record the current time
expect_stanza("/message[@from='#foo%{irc_server_one}/{nick_one}'][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='coucou 2']",
after = save_current_timestamp_plus_delta("first_timestamp", datetime.timedelta(seconds=1))),
# Wait two seconds before sending two new messages
sleep_for(2),
send_stanza("<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>coucou 3</body></message>"),
send_stanza("<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>coucou 4</body></message>"),
expect_stanza("/message[@type='groupchat']/body[text()='coucou 3']"),
expect_stanza("/message[@type='groupchat']/body[text()='coucou 4']",
after = save_current_timestamp_plus_delta("second_timestamp", datetime.timedelta(seconds=1))),
# Retrieve the archive, after our saved datetime
send_stanza("""<iq to='#foo%{irc_server_one}' from='{jid_one}/{resource_one}' type='set' id='id8'>
<query xmlns='urn:xmpp:mam:2' queryid='qid16'>
<x type='submit' xmlns='jabber:x:data'>
<field var='FORM_TYPE' xmlns='jabber:x:data'><value xmlns='jabber:x:data'>urn:xmpp:mam:2</value></field>
<field var='start' xmlns='jabber:x:data'><value xmlns='jabber:x:data'>{first_timestamp}</value></field>
<field var='end' xmlns='jabber:x:data'><value xmlns='jabber:x:data'>{second_timestamp}</value></field>
</x>
</query>
</iq>"""),
expect_stanza("/message/mam:result[@queryid='qid16']/forward:forwarded/delay:delay",
"/message/mam:result/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='coucou 3']"),
expect_stanza("/message/mam:result[@queryid='qid16']/forward:forwarded/delay:delay",
"/message/mam:result/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='coucou 4']"),
expect_stanza("/iq[@type='result'][@id='id8'][@from='#foo%{irc_server_one}'][@to='{jid_one}/{resource_one}']",
"/iq/mam:fin[@complete='true']/rsm:set"),
# Try the same thing, but only with the 'start' value, omitting the end
send_stanza("""<iq from='{jid_one}/{resource_one}' id='id888' to='#foo%{irc_server_one}' type='set'>
<query queryid='qid17' xmlns='urn:xmpp:mam:2'>
<x type='submit' xmlns='jabber:x:data'>
<field type='hidden' var='FORM_TYPE' xmlns='jabber:x:data'><value xmlns='jabber:x:data'>urn:xmpp:mam:2</value></field>
<field var='start' xmlns='jabber:x:data'><value xmlns='jabber:x:data'>{first_timestamp}</value></field>
</x>
</query>
</iq>"""),
expect_stanza("/message/mam:result[@queryid='qid17']/forward:forwarded/delay:delay",
"/message/mam:result/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='coucou 3']"),
expect_stanza("/message/mam:result[@queryid='qid17']/forward:forwarded/delay:delay",
"/message/mam:result/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='coucou 4']"),
expect_stanza("/iq[@type='result'][@id='id888'][@from='#foo%{irc_server_one}'][@to='{jid_one}/{resource_one}']",
"/iq/mam:fin[@complete='true']/rsm:set"),
)
| 68.71875
| 166
| 0.608004
| 555
| 4,398
| 4.654054
| 0.172973
| 0.034843
| 0.069686
| 0.087108
| 0.831204
| 0.759582
| 0.725126
| 0.724739
| 0.664344
| 0.650019
| 0
| 0.010335
| 0.185994
| 4,398
| 63
| 167
| 69.809524
| 0.711173
| 0.048886
| 0
| 0.444444
| 0
| 0.422222
| 0.795786
| 0.589897
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.022222
| 0
| 0.022222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1619d20de482b4c894da89b2712b5a49fb49e85a
| 16,929
|
py
|
Python
|
apportionment.py
|
sarahscheffler/dp-census
|
329797cdc5bbeebac23aa037e04023354bf09e9e
|
[
"BSD-2-Clause"
] | 2
|
2018-12-01T21:39:34.000Z
|
2018-12-01T21:40:16.000Z
|
apportionment.py
|
sarahscheffler/dp-census
|
329797cdc5bbeebac23aa037e04023354bf09e9e
|
[
"BSD-2-Clause"
] | null | null | null |
apportionment.py
|
sarahscheffler/dp-census
|
329797cdc5bbeebac23aa037e04023354bf09e9e
|
[
"BSD-2-Clause"
] | null | null | null |
'''
Functions that implement interesting apportionment functions
'''
import math
'''
Different ways of implementing Huntington Hill
'''
# Hutington Hill's method https://www.maa.org/press/periodicals/convergence/apportioning-representatives-in-the-united-states-congress-hills-method-of-apportionment
def iter_D_huntington_hill(populations, number_of_seats, ignore=dict()):
'''
populations: maps state code (two letters) to population of that state (number).
number_of_seats: number.
ignore: dict, if a state is in the dict, and is mapped to None, then it is ignored.
Returns: a map from state codes to seats allocated to that state. (oneshot method, modifies divisor D until it works)
'''
total_population = sum([0 if (population is None or (state in ignore and ignore[state] is None)) else population for state, population in populations.items()])
D = float(total_population) / float(number_of_seats)
# A single iteration of huntington_hill, computes for a given ratio D
# and returns resulting quotas and total number of apportioned seats
def iter(D):
quotas = dict()
total = 0
for state, population in populations.items():
if population is None or (state in ignore and ignore[state] is None):
quotas[state] = None
continue;
quota = population / D
flr = math.floor(quota)
cel = math.ceil(quota)
# round according to geometric mean
geomean = math.sqrt(flr * cel)
apportionment = cel if quota > geomean else flr
total += apportionment
quotas[state] = int(apportionment)
return (total, quotas)
gone_up = False
gone_down = False
while not gone_up or not gone_down:
total, quotas = iter(D)
if total == number_of_seats:
return quotas
# else: go up/down until we find new D
elif total < number_of_seats:
gone_down = True
D = D - 1
else:
gone_up = True
D = D + 1
# if we were "bouncing" then binary search to find new D
lower_D = D - 1
higher_D = D + 1
while True:
total, quotas = iter(D)
if total == number_of_seats:
return quotas
elif total < number_of_seats:
higher_D = D
D = (D + lower_D) / 2.0
else:
lower_D = D
D = (D + higher_D) / 2.0
def iter_seats_huntington_hill(populations, number_of_seats, ignore=dict()):
'''
populations: maps state code (two letters) to population of that state (number).
number_of_seats: number.
ignore: dict, if a state is in the dict, and is mapped to None, then it is ignored.
Returns: a map from state codes to seats allocated to that state (iterating over seats method)
'''
import heapq
# total population
total_population = sum([0 if (population is None or (state in ignore and ignore[state] is None)) else population for state, population in populations.items()])
# no body has any seats yet
apportionments = { state: (None if population is None or (state in ignore and ignore[state] is None) else 0) for state, population in populations.items() }
# priority queue via min heap
priorities = [ (float('-inf'), state) for state, population in populations.items() if not (population is None or (state in ignore and ignore[state] is None)) ]
heapq.heapify(priorities) # min heap: priorities are negated to make it a max heap!
for i in range(number_of_seats):
_, state = heapq.heappop(priorities)
apportionments[state] += 1
new_priority = populations[state] / math.sqrt(apportionments[state] * (apportionments[state]+1))
heapq.heappush(priorities, (-1 * new_priority, state))
return apportionments
def huntington_hill(populations, number_of_seats, ignore=dict()):
'''
populations: maps state code (two letters) to population of that state (number).
number_of_seats: number.
ignore: dict, if a state is in the dict, and is mapped to None, then it is ignored.
Returns: a map from state codes to seats allocated to that state.
'''
# iter is faster than oneshot
return iter_seats_huntington_hill(populations, number_of_seats, ignore)
'''
Webster
'''
# Webster's method https://www.maa.org/press/periodicals/convergence/apportioning-representatives-in-the-united-states-congress-websters-method-of-apportionment
def webster(populations, number_of_seats, ignore=dict()):
'''
populations: maps state code (two letters) to population of that state (number).
number_of_seats: number.
ignore: dict, if a state is in the dict, and is mapped to None, then it is ignored.
Returns: a map from state codes to seats allocated to that state.
'''
total_population = sum([0 if (population is None or (state in ignore and ignore[state] is None)) else population for state, population in populations.items()])
D = total_population / float(number_of_seats)
def iter(D):
quotas = dict()
total = 0
for state, population in populations.items():
if population is None or (state in ignore and ignore[state] is None):
quotas[state] = None
continue;
quota = population / D
apportionment = round(quota)
total += apportionment
quotas[state] = int(apportionment)
return (total, quotas)
gone_up = False
gone_down = False
while not gone_up or not gone_down:
total, quotas = iter(D)
if total == number_of_seats:
return quotas
# else: go up/down until we find new D
elif total < number_of_seats:
gone_down = True
D = D - 1
else:
gone_up = True
D = D + 1
# if we were "bouncing" then binary search to find new D
lower_D = D - 1
higher_D = D + 1
while True:
total, quotas = iter(D)
if total == number_of_seats:
return quotas
elif total < number_of_seats:
higher_D = D
D = (D + lower_D) / 2
else:
lower_D = D
D = (D + higher_D) / 2
'''
Dean
'''
# Dean's method https://www.maa.org/press/periodicals/convergence/apportioning-representatives-in-the-united-states-congress-deans-method-of-apportionment
def dean(populations, number_of_seats, ignore=dict()):
'''
populations: maps state code (two letters) to population of that state (number).
number_of_seats: number.
ignore: dict, if a state is in the dict, and is mapped to None, then it is ignored.
Returns: a map from state codes to seats allocated to that state.
'''
total_population = sum([0 if (population is None or (state in ignore and ignore[state] is None)) else population for state, population in populations.items()])
D = total_population / float(number_of_seats)
def iter(D):
quotas = dict()
total = 0
for state, population in populations.items():
if population is None or (state in ignore and ignore[state] is None):
quotas[state] = None
continue;
quota = population / D
flr = math.floor(quota)
cel = math.ceil(quota)
# round according to harmonic mean
harmonicmean = (flr*(cel))/(flr+0.5)
apportionment = cel if quota > harmonicmean else flr
total += apportionment
quotas[state] = int(apportionment)
return (total, quotas)
gone_up = False
gone_down = False
while not gone_up or not gone_down:
total, quotas = iter(D)
if total == number_of_seats:
return quotas
# else: go up/down until we find new D
elif total < number_of_seats:
gone_down = True
D = D - 1
else:
gone_up = True
D = D + 1
# if we were "bouncing" then binary search to find new D
lower_D = D - 1
higher_D = D + 1
while True:
total, quotas = iter(D)
if total == number_of_seats:
return quotas
elif total < number_of_seats:
higher_D = D
D = (D + lower_D) / 2
else:
lower_D = D
D = (D + higher_D) / 2
'''
Hamilton
'''
# Hamilton's method https://www.maa.org/press/periodicals/convergence/apportioning-representatives-in-the-united-states-congress-hamiltons-method-of-apportionment
def hamilton(populations, number_of_seats, ignore=dict()):
'''
populations: maps state code (two letters) to population of that state (number).
number_of_seats: number.
ignore: dict, if a state is in the dict, and is mapped to None, then it is ignored.
Returns: a map from state codes to seats allocated to that state.
'''
total_population = sum([0 if (population is None or (state in ignore and ignore[state] is None)) else population for state, population in populations.items()])
D = total_population / float(number_of_seats)
def iter(D):
quotas = dict()
remainders = []
total = 0
for state, population in populations.items():
if population is None or (state in ignore and ignore[state] is None):
quotas[state] = None
continue;
# first assign seats based on a rounded down state quota
# and keep the remainder
quota = population / D
apportionment = math.floor(quota)
remainder = quota - apportionment
total += apportionment
quotas[state] = int(apportionment)
remainders.append((remainder,state))
return (total, quotas, remainders)
total, quotas, remainders = iter(D)
print(total)
print("ns: ", number_of_seats)
#if there are no surplus of seats
#then we are done
if total == number_of_seats:
return quotas
#assign the surplus of seats to the states
#with the largest remainders
elif total < number_of_seats:
surplus = number_of_seats - total
remainders.sort(reverse=True)
for r,s in remainders:
if(surplus > 0 and r > 0):
if(r > 0):
quotas[s] += 1
surplus -=1
else:
return quotas
'''
lowndes
'''
# Lowndes's method https://www.maa.org/press/periodicals/convergence/apportioning-representatives-in-the-united-states-congress-lowndes-method-of-apportionment
def lowndes(populations, number_of_seats, ignore=dict()):
'''
populations: maps state code (two letters) to population of that state (number).
number_of_seats: number.
ignore: dict, if a state is in the dict, and is mapped to None, then it is ignored.
Returns: a map from state codes to seats allocated to that state.
'''
total_population = sum([0 if (population is None or (state in ignore and ignore[state] is None)) else population for state, population in populations.items()])
D = total_population / float(number_of_seats)
def iter(D):
quotas = dict()
remainders = []
total = 0
for state, population in populations.items():
if population is None or (state in ignore and ignore[state] is None):
quotas[state] = None
continue;
# first assign seats based on a rounded down state quota
# and keep the relative fraction part:
# fractional of quota divided by floor(quota)
quota = population / D
apportionment = math.floor(quota)
apportionment = apportionment if apportionment>0 else 1
remainder = (quota - apportionment) / apportionment
total += apportionment
quotas[state] = int(apportionment)
remainders.append((remainder,state))
return (total, quotas, remainders)
total, quotas, remainders = iter(D)
#if there are no surplus of seats
#then we are done
if total == number_of_seats:
return quotas
#assign the surplus of seats to the states
#with the largest remainders
elif total < number_of_seats:
surplus = number_of_seats - total
remainders.sort(reverse=True)
for r,s in remainders:
if(surplus > 0 and r > 0):
if(r > 0):
quotas[s] += 1
surplus -=1
else:
return quotas
'''
Jefferson
'''
# Jefferson's method https://www.maa.org/press/periodicals/convergence/apportioning-representatives-in-the-united-states-congress-jeffersons-method-of-apportionment
def jefferson(populations, number_of_seats, ignore=dict()):
'''
populations: maps state code (two letters) to population of that state (number).
number_of_seats: number.
ignore: dict, if a state is in the dict, and is mapped to None, then it is ignored.
Returns: a map from state codes to seats allocated to that state.
'''
total_population = sum([0 if (population is None or (state in ignore and ignore[state] is None)) else population for state, population in populations.items()])
D = total_population / float(number_of_seats)
d = 0
# A single iteration of jefferson, computes for a given ratio D and a given divisor adjustment d
# and returns resulting quotas and total number of apportioned seats
def iter(D, d):
quotas = dict()
total = 0
for state, population in populations.items():
if population is None or (state in ignore and ignore[state] is None):
quotas[state] = None
continue;
quota = population / (D - d)
apportionment = math.floor(quota)
total += apportionment
quotas[state] = int(apportionment)
return (total, quotas)
while True:
total, quotas = iter(D, d)
if total == number_of_seats:
return quotas
elif total < number_of_seats:
d = d - 1
else:
d = d + 1
'''
Inverted Jefferson
'''
# Adam's method https://www.maa.org/press/periodicals/convergence/apportioning-representatives-in-the-united-states-congress-adams-method-of-apportionment
# "Inverted Jefferson"
def adam(populations, number_of_seats, ignore=dict()):
'''
populations: maps state code (two letters) to population of that state (number).
number_of_seats: number.
ignore: dict, if a state is in the dict, and is mapped to None, then it is ignored.
Returns: a map from state codes to seats allocated to that state.
'''
total_population = sum([0 if (population is None or (state in ignore and ignore[state] is None)) else population for state, population in populations.items()])
D = total_population / float(number_of_seats)
d = 0
# A single iteration of jefferson, computes for a given ratio D and a given divisor adjustment d
# and returns resulting quotas and total number of apportioned seats
def iter(D, d):
quotas = dict()
total = 0
for state, population in populations.items():
if population is None or (state in ignore and ignore[state] is None):
quotas[state] = None
continue;
quota = population / (D + d)
apportionment = math.floor(quota)
total += apportionment
quotas[state] = int(apportionment)
return (total, quotas)
while True:
total, quotas = iter(D, d)
if total == number_of_seats:
return quotas
elif total < number_of_seats:
d = d - 1
else:
d = d + 1
if __name__ == '__main__':
from parse import *
import sys
years, total_population, state_population = parse_historical_populations()
_, total_seats, state_seats = parse_historical_seats_apportioned()
alg_name = 'huntington_hill'
if len(sys.argv) > 1:
alg_name = sys.argv[1]
domain = years
if len(sys.argv) > 2:
domain = map(int, sys.argv[2:])
for year in sorted(domain):
if year in [1920, 2017]: continue;
actual_output = globals()[alg_name](state_population[year], total_seats[year], ignore=state_seats[year])
if len(sys.argv) > 2:
for key in sorted(actual_output.keys()):
print(key, actual_output[key], state_seats[year][key], '' if actual_output[key] == state_seats[year][key] else '----------------')
else:
print(year, actual_output == state_seats[year])
| 34.977273
| 164
| 0.61941
| 2,199
| 16,929
| 4.67849
| 0.09186
| 0.036742
| 0.06318
| 0.034992
| 0.826691
| 0.824164
| 0.820956
| 0.796948
| 0.796948
| 0.78888
| 0
| 0.006037
| 0.295528
| 16,929
| 483
| 165
| 35.049689
| 0.856616
| 0.302735
| 0
| 0.807407
| 0
| 0
| 0.004139
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059259
| false
| 0
| 0.014815
| 0
| 0.151852
| 0.014815
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
164f1e41cdc4305b26e7d5d8b72ffbb694772fc1
| 151,873
|
py
|
Python
|
test/vanilla/version-tolerant/Expected/AcceptanceTests/BodyArrayVersionTolerant/bodyarrayversiontolerant/operations/_operations.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
test/vanilla/version-tolerant/Expected/AcceptanceTests/BodyArrayVersionTolerant/bodyarrayversiontolerant/operations/_operations.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
test/vanilla/version-tolerant/Expected/AcceptanceTests/BodyArrayVersionTolerant/bodyarrayversiontolerant/operations/_operations.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
T = TypeVar("T")
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_array_get_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/null'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_invalid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/invalid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_empty_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/empty'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_put_empty_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/array/empty'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_boolean_tfft_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/boolean/tfft'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_put_boolean_tfft_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/array/prim/boolean/tfft'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_boolean_invalid_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/boolean/true.null.false'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_boolean_invalid_string_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/boolean/true.boolean.false'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_integer_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/integer/1.-1.3.300'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_put_integer_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/array/prim/integer/1.-1.3.300'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_int_invalid_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/integer/1.null.zero'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_int_invalid_string_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/integer/1.integer.0'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_long_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/long/1.-1.3.300'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_put_long_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/array/prim/long/1.-1.3.300'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_long_invalid_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/long/1.null.zero'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_long_invalid_string_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/long/1.integer.0'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_float_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/float/0--0.01-1.2e20'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_put_float_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/array/prim/float/0--0.01-1.2e20'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_float_invalid_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/float/0.0-null-1.2e20'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_float_invalid_string_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/float/1.number.0'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_double_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/double/0--0.01-1.2e20'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_put_double_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/array/prim/double/0--0.01-1.2e20'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_double_invalid_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/double/0.0-null-1.2e20'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_double_invalid_string_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/double/1.number.0'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_string_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/string/foo1.foo2.foo3'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_put_string_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/array/prim/string/foo1.foo2.foo3'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_enum_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/enum/foo1.foo2.foo3'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_put_enum_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/array/prim/enum/foo1.foo2.foo3'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_string_enum_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/string-enum/foo1.foo2.foo3'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_put_string_enum_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/array/prim/string-enum/foo1.foo2.foo3'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_string_with_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/string/foo.null.foo2'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_string_with_invalid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/string/foo.123.foo2'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_uuid_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/uuid/valid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_put_uuid_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/array/prim/uuid/valid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_uuid_invalid_chars_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/uuid/invalidchars'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_date_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/date/valid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_put_date_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/array/prim/date/valid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_date_invalid_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/date/invalidnull'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_date_invalid_chars_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/date/invalidchars'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_date_time_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/date-time/valid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_put_date_time_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/array/prim/date-time/valid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_date_time_invalid_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/date-time/invalidnull'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_date_time_invalid_chars_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/date-time/invalidchars'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_date_time_rfc1123_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/date-time-rfc1123/valid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_put_date_time_rfc1123_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/array/prim/date-time-rfc1123/valid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_duration_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/duration/valid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_put_duration_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/array/prim/duration/valid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_byte_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/byte/valid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_put_byte_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/array/prim/byte/valid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_byte_invalid_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/byte/invalidnull'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_base64_url_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/prim/base64url/valid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_complex_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/complex/null'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_complex_empty_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/complex/empty'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_complex_item_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/complex/itemnull'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_complex_item_empty_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/complex/itemempty'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_complex_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/complex/valid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_put_complex_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/array/complex/valid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_array_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/array/null'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_array_empty_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/array/empty'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_array_item_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/array/itemnull'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_array_item_empty_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/array/itemempty'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_array_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/array/valid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_put_array_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/array/array/valid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_dictionary_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/dictionary/null'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_dictionary_empty_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/dictionary/empty'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_dictionary_item_null_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/dictionary/itemnull'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_dictionary_item_empty_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/dictionary/itemempty'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_get_dictionary_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = '/array/dictionary/valid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_array_put_dictionary_valid_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = '/array/dictionary/valid'
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
# fmt: on
class ArrayOperations(object):
"""ArrayOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_null(
self, **kwargs # type: Any
):
# type: (...) -> List[int]
"""Get null array value.
:return: list of int
:rtype: list[int]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
0 # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[int]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_null_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_null.metadata = {"url": "/array/null"} # type: ignore
@distributed_trace
def get_invalid(
self, **kwargs # type: Any
):
# type: (...) -> List[int]
"""Get invalid array [1, 2, 3.
:return: list of int
:rtype: list[int]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
0 # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[int]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_invalid_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_invalid.metadata = {"url": "/array/invalid"} # type: ignore
@distributed_trace
def get_empty(
self, **kwargs # type: Any
):
# type: (...) -> List[int]
"""Get empty array value [].
:return: list of int
:rtype: list[int]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
0 # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[int]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_empty_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_empty.metadata = {"url": "/array/empty"} # type: ignore
@distributed_trace
def put_empty(
self,
array_body, # type: List[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Set array value empty [].
:param array_body:
:type array_body: list[str]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
array_body = [
"str" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = array_body
request = build_array_put_empty_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_empty.metadata = {"url": "/array/empty"} # type: ignore
@distributed_trace
def get_boolean_tfft(
self, **kwargs # type: Any
):
# type: (...) -> List[bool]
"""Get boolean array value [true, false, false, true].
:return: list of bool
:rtype: list[bool]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
bool # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[bool]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_boolean_tfft_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_boolean_tfft.metadata = {"url": "/array/prim/boolean/tfft"} # type: ignore
@distributed_trace
def put_boolean_tfft(
self,
array_body, # type: List[bool]
**kwargs # type: Any
):
# type: (...) -> None
"""Set array value empty [true, false, false, true].
:param array_body:
:type array_body: list[bool]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
array_body = [
bool # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = array_body
request = build_array_put_boolean_tfft_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_boolean_tfft.metadata = {"url": "/array/prim/boolean/tfft"} # type: ignore
@distributed_trace
def get_boolean_invalid_null(
self, **kwargs # type: Any
):
# type: (...) -> List[bool]
"""Get boolean array value [true, null, false].
:return: list of bool
:rtype: list[bool]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
bool # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[bool]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_boolean_invalid_null_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_boolean_invalid_null.metadata = {"url": "/array/prim/boolean/true.null.false"} # type: ignore
@distributed_trace
def get_boolean_invalid_string(
self, **kwargs # type: Any
):
# type: (...) -> List[bool]
"""Get boolean array value [true, 'boolean', false].
:return: list of bool
:rtype: list[bool]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
bool # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[bool]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_boolean_invalid_string_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_boolean_invalid_string.metadata = {"url": "/array/prim/boolean/true.boolean.false"} # type: ignore
@distributed_trace
def get_integer_valid(
self, **kwargs # type: Any
):
# type: (...) -> List[int]
"""Get integer array value [1, -1, 3, 300].
:return: list of int
:rtype: list[int]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
0 # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[int]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_integer_valid_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_integer_valid.metadata = {"url": "/array/prim/integer/1.-1.3.300"} # type: ignore
@distributed_trace
def put_integer_valid(
self,
array_body, # type: List[int]
**kwargs # type: Any
):
# type: (...) -> None
"""Set array value empty [1, -1, 3, 300].
:param array_body:
:type array_body: list[int]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
array_body = [
0 # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = array_body
request = build_array_put_integer_valid_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_integer_valid.metadata = {"url": "/array/prim/integer/1.-1.3.300"} # type: ignore
@distributed_trace
def get_int_invalid_null(
self, **kwargs # type: Any
):
# type: (...) -> List[int]
"""Get integer array value [1, null, 0].
:return: list of int
:rtype: list[int]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
0 # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[int]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_int_invalid_null_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_int_invalid_null.metadata = {"url": "/array/prim/integer/1.null.zero"} # type: ignore
@distributed_trace
def get_int_invalid_string(
self, **kwargs # type: Any
):
# type: (...) -> List[int]
"""Get integer array value [1, 'integer', 0].
:return: list of int
:rtype: list[int]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
0 # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[int]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_int_invalid_string_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_int_invalid_string.metadata = {"url": "/array/prim/integer/1.integer.0"} # type: ignore
@distributed_trace
def get_long_valid(
self, **kwargs # type: Any
):
# type: (...) -> List[int]
"""Get integer array value [1, -1, 3, 300].
:return: list of long
:rtype: list[long]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
0.0 # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[int]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_long_valid_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_long_valid.metadata = {"url": "/array/prim/long/1.-1.3.300"} # type: ignore
@distributed_trace
def put_long_valid(
self,
array_body, # type: List[int]
**kwargs # type: Any
):
# type: (...) -> None
"""Set array value empty [1, -1, 3, 300].
:param array_body:
:type array_body: list[long]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
array_body = [
0.0 # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = array_body
request = build_array_put_long_valid_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_long_valid.metadata = {"url": "/array/prim/long/1.-1.3.300"} # type: ignore
@distributed_trace
def get_long_invalid_null(
self, **kwargs # type: Any
):
# type: (...) -> List[int]
"""Get long array value [1, null, 0].
:return: list of long
:rtype: list[long]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
0.0 # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[int]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_long_invalid_null_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_long_invalid_null.metadata = {"url": "/array/prim/long/1.null.zero"} # type: ignore
@distributed_trace
def get_long_invalid_string(
self, **kwargs # type: Any
):
# type: (...) -> List[int]
"""Get long array value [1, 'integer', 0].
:return: list of long
:rtype: list[long]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
0.0 # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[int]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_long_invalid_string_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_long_invalid_string.metadata = {"url": "/array/prim/long/1.integer.0"} # type: ignore
@distributed_trace
def get_float_valid(
self, **kwargs # type: Any
):
# type: (...) -> List[float]
"""Get float array value [0, -0.01, 1.2e20].
:return: list of float
:rtype: list[float]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
0.0 # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[float]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_float_valid_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_float_valid.metadata = {"url": "/array/prim/float/0--0.01-1.2e20"} # type: ignore
@distributed_trace
def put_float_valid(
self,
array_body, # type: List[float]
**kwargs # type: Any
):
# type: (...) -> None
"""Set array value [0, -0.01, 1.2e20].
:param array_body:
:type array_body: list[float]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
array_body = [
0.0 # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = array_body
request = build_array_put_float_valid_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_float_valid.metadata = {"url": "/array/prim/float/0--0.01-1.2e20"} # type: ignore
@distributed_trace
def get_float_invalid_null(
self, **kwargs # type: Any
):
# type: (...) -> List[float]
"""Get float array value [0.0, null, -1.2e20].
:return: list of float
:rtype: list[float]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
0.0 # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[float]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_float_invalid_null_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_float_invalid_null.metadata = {"url": "/array/prim/float/0.0-null-1.2e20"} # type: ignore
@distributed_trace
def get_float_invalid_string(
self, **kwargs # type: Any
):
# type: (...) -> List[float]
"""Get boolean array value [1.0, 'number', 0.0].
:return: list of float
:rtype: list[float]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
0.0 # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[float]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_float_invalid_string_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_float_invalid_string.metadata = {"url": "/array/prim/float/1.number.0"} # type: ignore
@distributed_trace
def get_double_valid(
self, **kwargs # type: Any
):
# type: (...) -> List[float]
"""Get float array value [0, -0.01, 1.2e20].
:return: list of float
:rtype: list[float]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
0.0 # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[float]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_double_valid_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_double_valid.metadata = {"url": "/array/prim/double/0--0.01-1.2e20"} # type: ignore
@distributed_trace
def put_double_valid(
self,
array_body, # type: List[float]
**kwargs # type: Any
):
# type: (...) -> None
"""Set array value [0, -0.01, 1.2e20].
:param array_body:
:type array_body: list[float]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
array_body = [
0.0 # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = array_body
request = build_array_put_double_valid_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_double_valid.metadata = {"url": "/array/prim/double/0--0.01-1.2e20"} # type: ignore
@distributed_trace
def get_double_invalid_null(
self, **kwargs # type: Any
):
# type: (...) -> List[float]
"""Get float array value [0.0, null, -1.2e20].
:return: list of float
:rtype: list[float]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
0.0 # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[float]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_double_invalid_null_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_double_invalid_null.metadata = {"url": "/array/prim/double/0.0-null-1.2e20"} # type: ignore
@distributed_trace
def get_double_invalid_string(
self, **kwargs # type: Any
):
# type: (...) -> List[float]
"""Get boolean array value [1.0, 'number', 0.0].
:return: list of float
:rtype: list[float]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
0.0 # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[float]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_double_invalid_string_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_double_invalid_string.metadata = {"url": "/array/prim/double/1.number.0"} # type: ignore
@distributed_trace
def get_string_valid(
self, **kwargs # type: Any
):
# type: (...) -> List[str]
"""Get string array value ['foo1', 'foo2', 'foo3'].
:return: list of str
:rtype: list[str]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
"str" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[str]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_string_valid_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_string_valid.metadata = {"url": "/array/prim/string/foo1.foo2.foo3"} # type: ignore
@distributed_trace
def put_string_valid(
self,
array_body, # type: List[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Set array value ['foo1', 'foo2', 'foo3'].
:param array_body:
:type array_body: list[str]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
array_body = [
"str" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = array_body
request = build_array_put_string_valid_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_string_valid.metadata = {"url": "/array/prim/string/foo1.foo2.foo3"} # type: ignore
@distributed_trace
def get_enum_valid(
self, **kwargs # type: Any
):
# type: (...) -> List[str]
"""Get enum array value ['foo1', 'foo2', 'foo3'].
:return: list of str. Possible values are: "foo1", "foo2", and "foo3".
:rtype: list[str]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
"str" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[str]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_enum_valid_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_enum_valid.metadata = {"url": "/array/prim/enum/foo1.foo2.foo3"} # type: ignore
@distributed_trace
def put_enum_valid(
self,
array_body, # type: List[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Set array value ['foo1', 'foo2', 'foo3'].
:param array_body:
:type array_body: list[str]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
array_body = [
"str" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = array_body
request = build_array_put_enum_valid_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_enum_valid.metadata = {"url": "/array/prim/enum/foo1.foo2.foo3"} # type: ignore
@distributed_trace
def get_string_enum_valid(
self, **kwargs # type: Any
):
# type: (...) -> List[str]
"""Get enum array value ['foo1', 'foo2', 'foo3'].
:return: list of str. Possible values are: "foo1", "foo2", and "foo3".
:rtype: list[str]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
"str" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[str]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_string_enum_valid_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_string_enum_valid.metadata = {"url": "/array/prim/string-enum/foo1.foo2.foo3"} # type: ignore
@distributed_trace
def put_string_enum_valid(
self,
array_body, # type: List[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Set array value ['foo1', 'foo2', 'foo3'].
:param array_body:
:type array_body: list[str]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
array_body = [
"str" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = array_body
request = build_array_put_string_enum_valid_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_string_enum_valid.metadata = {"url": "/array/prim/string-enum/foo1.foo2.foo3"} # type: ignore
@distributed_trace
def get_string_with_null(
self, **kwargs # type: Any
):
# type: (...) -> List[str]
"""Get string array value ['foo', null, 'foo2'].
:return: list of str
:rtype: list[str]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
"str" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[str]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_string_with_null_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_string_with_null.metadata = {"url": "/array/prim/string/foo.null.foo2"} # type: ignore
@distributed_trace
def get_string_with_invalid(
self, **kwargs # type: Any
):
# type: (...) -> List[str]
"""Get string array value ['foo', 123, 'foo2'].
:return: list of str
:rtype: list[str]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
"str" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[str]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_string_with_invalid_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_string_with_invalid.metadata = {"url": "/array/prim/string/foo.123.foo2"} # type: ignore
@distributed_trace
def get_uuid_valid(
self, **kwargs # type: Any
):
# type: (...) -> List[str]
"""Get uuid array value ['6dcc7237-45fe-45c4-8a6b-3a8a3f625652',
'd1399005-30f7-40d6-8da6-dd7c89ad34db', 'f42f6aa1-a5bc-4ddf-907e-5f915de43205'].
:return: list of str
:rtype: list[str]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
str # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[str]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_uuid_valid_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_uuid_valid.metadata = {"url": "/array/prim/uuid/valid"} # type: ignore
@distributed_trace
def put_uuid_valid(
self,
array_body, # type: List[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Set array value ['6dcc7237-45fe-45c4-8a6b-3a8a3f625652',
'd1399005-30f7-40d6-8da6-dd7c89ad34db', 'f42f6aa1-a5bc-4ddf-907e-5f915de43205'].
:param array_body:
:type array_body: list[str]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
array_body = [
str # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = array_body
request = build_array_put_uuid_valid_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_uuid_valid.metadata = {"url": "/array/prim/uuid/valid"} # type: ignore
@distributed_trace
def get_uuid_invalid_chars(
self, **kwargs # type: Any
):
# type: (...) -> List[str]
"""Get uuid array value ['6dcc7237-45fe-45c4-8a6b-3a8a3f625652', 'foo'].
:return: list of str
:rtype: list[str]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
str # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[str]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_uuid_invalid_chars_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_uuid_invalid_chars.metadata = {"url": "/array/prim/uuid/invalidchars"} # type: ignore
@distributed_trace
def get_date_valid(
self, **kwargs # type: Any
):
# type: (...) -> List[datetime.date]
"""Get integer array value ['2000-12-01', '1980-01-02', '1492-10-12'].
:return: list of date
:rtype: list[~datetime.date]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
"2020-02-20" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[datetime.date]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_date_valid_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_date_valid.metadata = {"url": "/array/prim/date/valid"} # type: ignore
@distributed_trace
def put_date_valid(
self,
array_body, # type: List[datetime.date]
**kwargs # type: Any
):
# type: (...) -> None
"""Set array value ['2000-12-01', '1980-01-02', '1492-10-12'].
:param array_body:
:type array_body: list[~datetime.date]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
array_body = [
"2020-02-20" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = array_body
request = build_array_put_date_valid_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_date_valid.metadata = {"url": "/array/prim/date/valid"} # type: ignore
@distributed_trace
def get_date_invalid_null(
self, **kwargs # type: Any
):
# type: (...) -> List[datetime.date]
"""Get date array value ['2012-01-01', null, '1776-07-04'].
:return: list of date
:rtype: list[~datetime.date]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
"2020-02-20" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[datetime.date]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_date_invalid_null_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_date_invalid_null.metadata = {"url": "/array/prim/date/invalidnull"} # type: ignore
@distributed_trace
def get_date_invalid_chars(
self, **kwargs # type: Any
):
# type: (...) -> List[datetime.date]
"""Get date array value ['2011-03-22', 'date'].
:return: list of date
:rtype: list[~datetime.date]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
"2020-02-20" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[datetime.date]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_date_invalid_chars_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_date_invalid_chars.metadata = {"url": "/array/prim/date/invalidchars"} # type: ignore
@distributed_trace
def get_date_time_valid(
self, **kwargs # type: Any
):
# type: (...) -> List[datetime.datetime]
"""Get date-time array value ['2000-12-01t00:00:01z', '1980-01-02T00:11:35+01:00',
'1492-10-12T10:15:01-08:00'].
:return: list of datetime
:rtype: list[~datetime.datetime]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
"2020-02-20 00:00:00" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[datetime.datetime]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_date_time_valid_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_date_time_valid.metadata = {"url": "/array/prim/date-time/valid"} # type: ignore
@distributed_trace
def put_date_time_valid(
self,
array_body, # type: List[datetime.datetime]
**kwargs # type: Any
):
# type: (...) -> None
"""Set array value ['2000-12-01t00:00:01z', '1980-01-02T00:11:35+01:00',
'1492-10-12T10:15:01-08:00'].
:param array_body:
:type array_body: list[~datetime.datetime]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
array_body = [
"2020-02-20 00:00:00" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = array_body
request = build_array_put_date_time_valid_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_date_time_valid.metadata = {"url": "/array/prim/date-time/valid"} # type: ignore
@distributed_trace
def get_date_time_invalid_null(
self, **kwargs # type: Any
):
# type: (...) -> List[datetime.datetime]
"""Get date array value ['2000-12-01t00:00:01z', null].
:return: list of datetime
:rtype: list[~datetime.datetime]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
"2020-02-20 00:00:00" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[datetime.datetime]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_date_time_invalid_null_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_date_time_invalid_null.metadata = {"url": "/array/prim/date-time/invalidnull"} # type: ignore
@distributed_trace
def get_date_time_invalid_chars(
self, **kwargs # type: Any
):
# type: (...) -> List[datetime.datetime]
"""Get date array value ['2000-12-01t00:00:01z', 'date-time'].
:return: list of datetime
:rtype: list[~datetime.datetime]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
"2020-02-20 00:00:00" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[datetime.datetime]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_date_time_invalid_chars_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_date_time_invalid_chars.metadata = {"url": "/array/prim/date-time/invalidchars"} # type: ignore
@distributed_trace
def get_date_time_rfc1123_valid(
self, **kwargs # type: Any
):
# type: (...) -> List[datetime.datetime]
"""Get date-time array value ['Fri, 01 Dec 2000 00:00:01 GMT', 'Wed, 02 Jan 1980 00:11:35 GMT',
'Wed, 12 Oct 1492 10:15:01 GMT'].
:return: list of datetime
:rtype: list[~datetime.datetime]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
"2020-02-20 00:00:00" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[datetime.datetime]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_date_time_rfc1123_valid_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_date_time_rfc1123_valid.metadata = {"url": "/array/prim/date-time-rfc1123/valid"} # type: ignore
@distributed_trace
def put_date_time_rfc1123_valid(
self,
array_body, # type: List[datetime.datetime]
**kwargs # type: Any
):
# type: (...) -> None
"""Set array value ['Fri, 01 Dec 2000 00:00:01 GMT', 'Wed, 02 Jan 1980 00:11:35 GMT', 'Wed, 12
Oct 1492 10:15:01 GMT'].
:param array_body:
:type array_body: list[~datetime.datetime]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
array_body = [
"2020-02-20 00:00:00" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = array_body
request = build_array_put_date_time_rfc1123_valid_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_date_time_rfc1123_valid.metadata = {"url": "/array/prim/date-time-rfc1123/valid"} # type: ignore
@distributed_trace
def get_duration_valid(
self, **kwargs # type: Any
):
# type: (...) -> List[datetime.timedelta]
"""Get duration array value ['P123DT22H14M12.011S', 'P5DT1H0M0S'].
:return: list of timedelta
:rtype: list[~datetime.timedelta]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
"1 day, 0:00:00" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[datetime.timedelta]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_duration_valid_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_duration_valid.metadata = {"url": "/array/prim/duration/valid"} # type: ignore
@distributed_trace
def put_duration_valid(
self,
array_body, # type: List[datetime.timedelta]
**kwargs # type: Any
):
# type: (...) -> None
"""Set array value ['P123DT22H14M12.011S', 'P5DT1H0M0S'].
:param array_body:
:type array_body: list[~datetime.timedelta]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
array_body = [
"1 day, 0:00:00" # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = array_body
request = build_array_put_duration_valid_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_duration_valid.metadata = {"url": "/array/prim/duration/valid"} # type: ignore
@distributed_trace
def get_byte_valid(
self, **kwargs # type: Any
):
# type: (...) -> List[bytearray]
"""Get byte array value [hex(FF FF FF FA), hex(01 02 03), hex (25, 29, 43)] with each item encoded
in base64.
:return: list of bytearray
:rtype: list[bytearray]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
bytearray("bytearray", encoding="utf-8") # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[bytearray]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_byte_valid_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_byte_valid.metadata = {"url": "/array/prim/byte/valid"} # type: ignore
@distributed_trace
def put_byte_valid(
self,
array_body, # type: List[bytearray]
**kwargs # type: Any
):
# type: (...) -> None
"""Put the array value [hex(FF FF FF FA), hex(01 02 03), hex (25, 29, 43)] with each
elementencoded in base 64.
:param array_body:
:type array_body: list[bytearray]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
array_body = [
bytearray("bytearray", encoding="utf-8") # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = array_body
request = build_array_put_byte_valid_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_byte_valid.metadata = {"url": "/array/prim/byte/valid"} # type: ignore
@distributed_trace
def get_byte_invalid_null(
self, **kwargs # type: Any
):
# type: (...) -> List[bytearray]
"""Get byte array value [hex(AB, AC, AD), null] with the first item base64 encoded.
:return: list of bytearray
:rtype: list[bytearray]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
bytearray("bytearray", encoding="utf-8") # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[bytearray]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_byte_invalid_null_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_byte_invalid_null.metadata = {"url": "/array/prim/byte/invalidnull"} # type: ignore
@distributed_trace
def get_base64_url(
self, **kwargs # type: Any
):
# type: (...) -> List[bytes]
"""Get array value ['a string that gets encoded with base64url', 'test string' 'Lorem ipsum'] with
the items base64url encoded.
:return: list of bytes
:rtype: list[bytes]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
bytes("bytes", encoding="utf-8") # Optional.
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[bytes]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_base64_url_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_base64_url.metadata = {"url": "/array/prim/base64url/valid"} # type: ignore
@distributed_trace
def get_complex_null(
self, **kwargs # type: Any
):
# type: (...) -> List[JSONType]
"""Get array of complex type null value.
:return: list of JSON object
:rtype: list[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
{
"integer": 0, # Optional.
"string": "str" # Optional.
}
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[JSONType]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_complex_null_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_complex_null.metadata = {"url": "/array/complex/null"} # type: ignore
@distributed_trace
def get_complex_empty(
self, **kwargs # type: Any
):
# type: (...) -> List[JSONType]
"""Get empty array of complex type [].
:return: list of JSON object
:rtype: list[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
{
"integer": 0, # Optional.
"string": "str" # Optional.
}
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[JSONType]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_complex_empty_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_complex_empty.metadata = {"url": "/array/complex/empty"} # type: ignore
@distributed_trace
def get_complex_item_null(
self, **kwargs # type: Any
):
# type: (...) -> List[JSONType]
"""Get array of complex type with null item [{'integer': 1 'string': '2'}, null, {'integer': 5,
'string': '6'}].
:return: list of JSON object
:rtype: list[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
{
"integer": 0, # Optional.
"string": "str" # Optional.
}
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[JSONType]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_complex_item_null_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_complex_item_null.metadata = {"url": "/array/complex/itemnull"} # type: ignore
@distributed_trace
def get_complex_item_empty(
self, **kwargs # type: Any
):
# type: (...) -> List[JSONType]
"""Get array of complex type with empty item [{'integer': 1 'string': '2'}, {}, {'integer': 5,
'string': '6'}].
:return: list of JSON object
:rtype: list[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
{
"integer": 0, # Optional.
"string": "str" # Optional.
}
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[JSONType]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_complex_item_empty_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_complex_item_empty.metadata = {"url": "/array/complex/itemempty"} # type: ignore
@distributed_trace
def get_complex_valid(
self, **kwargs # type: Any
):
# type: (...) -> List[JSONType]
"""Get array of complex type with [{'integer': 1 'string': '2'}, {'integer': 3, 'string': '4'},
{'integer': 5, 'string': '6'}].
:return: list of JSON object
:rtype: list[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
{
"integer": 0, # Optional.
"string": "str" # Optional.
}
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[JSONType]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_complex_valid_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_complex_valid.metadata = {"url": "/array/complex/valid"} # type: ignore
@distributed_trace
def put_complex_valid(
self,
array_body, # type: List[JSONType]
**kwargs # type: Any
):
# type: (...) -> None
"""Put an array of complex type with values [{'integer': 1 'string': '2'}, {'integer': 3,
'string': '4'}, {'integer': 5, 'string': '6'}].
:param array_body:
:type array_body: list[JSONType]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
array_body = [
{
"integer": 0, # Optional.
"string": "str" # Optional.
}
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = array_body
request = build_array_put_complex_valid_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_complex_valid.metadata = {"url": "/array/complex/valid"} # type: ignore
@distributed_trace
def get_array_null(
self, **kwargs # type: Any
):
# type: (...) -> List[List[str]]
"""Get a null array.
:return: list of list of str
:rtype: list[list[str]]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
[
"str" # Optional.
]
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[List[str]]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_array_null_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_array_null.metadata = {"url": "/array/array/null"} # type: ignore
@distributed_trace
def get_array_empty(
self, **kwargs # type: Any
):
# type: (...) -> List[List[str]]
"""Get an empty array [].
:return: list of list of str
:rtype: list[list[str]]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
[
"str" # Optional.
]
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[List[str]]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_array_empty_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_array_empty.metadata = {"url": "/array/array/empty"} # type: ignore
@distributed_trace
def get_array_item_null(
self, **kwargs # type: Any
):
# type: (...) -> List[List[str]]
"""Get an array of array of strings [['1', '2', '3'], null, ['7', '8', '9']].
:return: list of list of str
:rtype: list[list[str]]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
[
"str" # Optional.
]
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[List[str]]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_array_item_null_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_array_item_null.metadata = {"url": "/array/array/itemnull"} # type: ignore
@distributed_trace
def get_array_item_empty(
self, **kwargs # type: Any
):
# type: (...) -> List[List[str]]
"""Get an array of array of strings [['1', '2', '3'], [], ['7', '8', '9']].
:return: list of list of str
:rtype: list[list[str]]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
[
"str" # Optional.
]
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[List[str]]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_array_item_empty_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_array_item_empty.metadata = {"url": "/array/array/itemempty"} # type: ignore
@distributed_trace
def get_array_valid(
self, **kwargs # type: Any
):
# type: (...) -> List[List[str]]
"""Get an array of array of strings [['1', '2', '3'], ['4', '5', '6'], ['7', '8', '9']].
:return: list of list of str
:rtype: list[list[str]]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
[
"str" # Optional.
]
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[List[str]]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_array_valid_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_array_valid.metadata = {"url": "/array/array/valid"} # type: ignore
@distributed_trace
def put_array_valid(
self,
array_body, # type: List[List[str]]
**kwargs # type: Any
):
# type: (...) -> None
"""Put An array of array of strings [['1', '2', '3'], ['4', '5', '6'], ['7', '8', '9']].
:param array_body:
:type array_body: list[list[str]]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
array_body = [
[
"str" # Optional.
]
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = array_body
request = build_array_put_array_valid_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_array_valid.metadata = {"url": "/array/array/valid"} # type: ignore
@distributed_trace
def get_dictionary_null(
self, **kwargs # type: Any
):
# type: (...) -> List[Dict[str, str]]
"""Get an array of Dictionaries with value null.
:return: list of dict mapping str to str
:rtype: list[dict[str, str]]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
{
"str": "str" # Optional.
}
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[Dict[str, str]]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_dictionary_null_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_dictionary_null.metadata = {"url": "/array/dictionary/null"} # type: ignore
@distributed_trace
def get_dictionary_empty(
self, **kwargs # type: Any
):
# type: (...) -> List[Dict[str, str]]
"""Get an array of Dictionaries of type <string, string> with value [].
:return: list of dict mapping str to str
:rtype: list[dict[str, str]]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
{
"str": "str" # Optional.
}
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[Dict[str, str]]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_dictionary_empty_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_dictionary_empty.metadata = {"url": "/array/dictionary/empty"} # type: ignore
@distributed_trace
def get_dictionary_item_null(
self, **kwargs # type: Any
):
# type: (...) -> List[Dict[str, str]]
"""Get an array of Dictionaries of type <string, string> with value [{'1': 'one', '2': 'two', '3':
'three'}, null, {'7': 'seven', '8': 'eight', '9': 'nine'}].
:return: list of dict mapping str to str
:rtype: list[dict[str, str]]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
{
"str": "str" # Optional.
}
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[Dict[str, str]]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_dictionary_item_null_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_dictionary_item_null.metadata = {"url": "/array/dictionary/itemnull"} # type: ignore
@distributed_trace
def get_dictionary_item_empty(
self, **kwargs # type: Any
):
# type: (...) -> List[Dict[str, str]]
"""Get an array of Dictionaries of type <string, string> with value [{'1': 'one', '2': 'two', '3':
'three'}, {}, {'7': 'seven', '8': 'eight', '9': 'nine'}].
:return: list of dict mapping str to str
:rtype: list[dict[str, str]]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
{
"str": "str" # Optional.
}
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[Dict[str, str]]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_dictionary_item_empty_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_dictionary_item_empty.metadata = {"url": "/array/dictionary/itemempty"} # type: ignore
@distributed_trace
def get_dictionary_valid(
self, **kwargs # type: Any
):
# type: (...) -> List[Dict[str, str]]
"""Get an array of Dictionaries of type <string, string> with value [{'1': 'one', '2': 'two', '3':
'three'}, {'4': 'four', '5': 'five', '6': 'six'}, {'7': 'seven', '8': 'eight', '9': 'nine'}].
:return: list of dict mapping str to str
:rtype: list[dict[str, str]]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == [
{
"str": "str" # Optional.
}
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[List[Dict[str, str]]]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
request = build_array_get_dictionary_valid_request()
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_dictionary_valid.metadata = {"url": "/array/dictionary/valid"} # type: ignore
@distributed_trace
def put_dictionary_valid(
self,
array_body, # type: List[Dict[str, str]]
**kwargs # type: Any
):
# type: (...) -> None
"""Get an array of Dictionaries of type <string, string> with value [{'1': 'one', '2': 'two', '3':
'three'}, {'4': 'four', '5': 'five', '6': 'six'}, {'7': 'seven', '8': 'eight', '9': 'nine'}].
:param array_body:
:type array_body: list[dict[str, str]]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
array_body = [
{
"str": "str" # Optional.
}
]
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_json = array_body
request = build_array_put_dictionary_valid_request(
content_type=content_type,
json=_json,
)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
put_dictionary_valid.metadata = {"url": "/array/dictionary/valid"} # type: ignore
| 32.020451
| 107
| 0.60197
| 16,054
| 151,873
| 5.511337
| 0.01987
| 0.031194
| 0.020276
| 0.026515
| 0.975429
| 0.96927
| 0.963347
| 0.950022
| 0.938856
| 0.929192
| 0
| 0.019047
| 0.278878
| 151,873
| 4,742
| 108
| 32.027204
| 0.78884
| 0.250703
| 0
| 0.832281
| 0
| 0
| 0.081938
| 0.030531
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054854
| false
| 0
| 0.004341
| 0
| 0.134175
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
167e7b6d78021be49e8bce59ce95eb9cac4c285b
| 70,198
|
py
|
Python
|
planit/migrations/0001_initial.py
|
Class-Planit/class-planit
|
76df43c9a8b4e69b420ab9ae1d1eeb723ae4cccd
|
[
"MIT"
] | null | null | null |
planit/migrations/0001_initial.py
|
Class-Planit/class-planit
|
76df43c9a8b4e69b420ab9ae1d1eeb723ae4cccd
|
[
"MIT"
] | null | null | null |
planit/migrations/0001_initial.py
|
Class-Planit/class-planit
|
76df43c9a8b4e69b420ab9ae1d1eeb723ae4cccd
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.4 on 2021-08-09 15:45
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import tinymce.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('is_parent', models.BooleanField(default=False)),
('is_teacher', models.BooleanField(default=False)),
('is_admin', models.BooleanField(default=False)),
('is_destrict', models.BooleanField(default=False)),
('is_student', models.BooleanField(default=False)),
('is_archived', models.BooleanField(default=False)),
('is_pending', models.BooleanField(default=False)),
('is_demo', models.BooleanField(default=False)),
('city', models.CharField(max_length=100)),
('state', models.CharField(max_length=100)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='academicYear',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_date', models.DateField(blank=True, null=True)),
('end_date', models.DateField(blank=True, null=True)),
('is_active', models.BooleanField(default=True)),
('planning_teacher', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='bloomsLevel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=20, null=True)),
('group', models.CharField(blank=True, max_length=50, null=True)),
('color', models.CharField(blank=True, max_length=50, null=True)),
],
),
migrations.CreateModel(
name='classroom',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('classroom_title', models.CharField(max_length=100)),
('is_active', models.BooleanField(default=True)),
('academic_year', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.academicyear')),
],
),
migrations.CreateModel(
name='gradeLevel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('grade', models.CharField(max_length=30)),
('grade_labels', models.CharField(blank=True, max_length=50, null=True)),
('grade_image', models.ImageField(blank=True, null=True, upload_to='images/grades/')),
],
),
migrations.CreateModel(
name='LearningDemonstration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(blank=True, max_length=1000, null=True)),
('topic_id', models.IntegerField(blank=True, default=0, null=True)),
('is_admin', models.BooleanField(default=False)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='LearningDemonstrationTemplate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(blank=True, max_length=1000, null=True)),
('is_plural', models.BooleanField(default=False)),
('is_multi', models.BooleanField(default=False)),
('grade_level', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.gradelevel')),
],
),
migrations.CreateModel(
name='learningStyle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=20, null=True)),
('group', models.CharField(blank=True, max_length=50, null=True)),
('color', models.CharField(blank=True, max_length=50, null=True)),
],
),
migrations.CreateModel(
name='lessonObjective',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('week_of', models.IntegerField(blank=True, default=0, null=True)),
('is_skill', models.BooleanField(default=False)),
('objective_title', models.CharField(blank=True, max_length=200, null=True)),
('teacher_objective', models.CharField(blank=True, max_length=1500, null=True)),
('current_grade_level', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.gradelevel')),
('lesson_classroom', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.classroom')),
('objectives_demonstration', models.ManyToManyField(blank=True, null=True, related_name='objectives_demonstration', to='planit.LearningDemonstration')),
],
),
migrations.CreateModel(
name='lessonPDFImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image_image', models.ImageField(blank=True, null=True, upload_to='images/words/')),
('matched_lesson', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.lessonobjective')),
],
),
migrations.CreateModel(
name='lessonSection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_num', models.CharField(blank=True, max_length=5, null=True)),
('title', models.CharField(blank=True, max_length=150, null=True)),
('content', models.CharField(blank=True, max_length=1000, null=True)),
],
),
migrations.CreateModel(
name='lessonSectionTemplate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=300, null=True)),
('is_admin', models.BooleanField(default=False)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('lesson_section', models.ManyToManyField(blank=True, to='planit.lessonSection')),
],
),
migrations.CreateModel(
name='mainQuestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.CharField(max_length=500)),
('answer', models.CharField(blank=True, max_length=1000, null=True)),
],
),
migrations.CreateModel(
name='multipleIntelligence',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mi', models.CharField(blank=True, max_length=200, null=True)),
('mi_label', models.CharField(blank=True, max_length=100, null=True)),
],
),
migrations.CreateModel(
name='questionType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item', models.CharField(blank=True, max_length=200, null=True)),
],
),
migrations.CreateModel(
name='shortStorySection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=500, null=True)),
('text', models.CharField(blank=True, max_length=5000, null=True)),
],
),
migrations.CreateModel(
name='singleStandard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('standard_identifier', models.CharField(blank=True, max_length=100, null=True)),
('skill_topic', models.TextField(max_length=200)),
('standard_objective', models.CharField(blank=True, max_length=1000, null=True)),
('competency', models.TextField(max_length=1000)),
('grade_level', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.gradelevel')),
],
),
migrations.CreateModel(
name='standardSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Location', models.TextField(max_length=500)),
],
),
migrations.CreateModel(
name='standardSubjects',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject_title', models.CharField(max_length=100)),
('is_admin', models.BooleanField(default=False)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('grade_level', models.ManyToManyField(blank=True, related_name='subject_grades', to='planit.gradeLevel')),
('standards_set', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardset')),
],
),
migrations.CreateModel(
name='storyFull',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Title', models.CharField(blank=True, max_length=500, null=True)),
],
),
migrations.CreateModel(
name='storySection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Title', models.CharField(blank=True, max_length=1000, null=True)),
('Section', models.CharField(blank=True, max_length=10000, null=True)),
('Section_Image', models.ImageField(blank=True, null=True, upload_to='images/story/')),
],
),
migrations.CreateModel(
name='studentPraiseTheme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_image', models.ImageField(blank=True, null=True, upload_to='images/praise/')),
('theme_title', models.CharField(blank=True, max_length=200, null=True)),
],
),
migrations.CreateModel(
name='studentQuestionAnswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nickname', models.CharField(blank=True, max_length=200, null=True)),
('question', models.CharField(blank=True, max_length=200, null=True)),
('correct', models.CharField(blank=True, max_length=200, null=True)),
('answer', models.CharField(blank=True, max_length=200, null=True)),
('is_graded', models.BooleanField(default=False)),
('is_correct', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='studentWorksheetAnswerFull',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('week_of', models.IntegerField(blank=True, default=0, null=True)),
('nickname', models.CharField(blank=True, max_length=200, null=True)),
('correct_points', models.IntegerField(blank=True, default=0, null=True)),
('total_possible', models.IntegerField(blank=True, default=0, null=True)),
('score', models.IntegerField(blank=True, default=0, null=True)),
('is_graded', models.BooleanField(default=False)),
('is_passing', models.BooleanField(default=False)),
('is_submitted', models.BooleanField(default=False)),
('assignment_num', models.IntegerField(blank=True, default=0, null=True)),
('completion_date', models.DateField(blank=True, null=True)),
('assigned_date', models.DateField(blank=True, null=True)),
('due_date', models.DateField(blank=True, null=True)),
('assigned_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='assigned_by', to=settings.AUTH_USER_MODEL)),
('student', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='student_info', to=settings.AUTH_USER_MODEL)),
('student_answers', models.ManyToManyField(blank=True, null=True, related_name='student_answers', to='planit.studentQuestionAnswer')),
],
),
migrations.CreateModel(
name='teacherQuestionnaire',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('current_grade', models.CharField(blank=True, max_length=100, null=True)),
('current_state', models.CharField(blank=True, max_length=100, null=True)),
('current_planning', models.TextField(blank=True, max_length=1000, null=True)),
('happy_scale', models.CharField(blank=True, max_length=50, null=True)),
('lesson_good', models.TextField(blank=True, max_length=1000, null=True)),
('lesson_bad', models.TextField(blank=True, max_length=1000, null=True)),
('ideal_feature', models.TextField(blank=True, max_length=1000, null=True)),
],
),
migrations.CreateModel(
name='textBookBackground',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('line_counter', models.IntegerField(blank=True, default=0, null=True)),
('page_counter', models.IntegerField(blank=True, default=0, null=True)),
('section', models.CharField(blank=True, max_length=500, null=True)),
('header', models.CharField(blank=True, max_length=500, null=True)),
('line_text', models.CharField(blank=True, max_length=1000, null=True)),
('line_lemma', models.CharField(blank=True, max_length=1000, null=True)),
('term_created', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='textBookTitle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField(max_length=500)),
('lesson_id_num', models.IntegerField(blank=True, default=0, null=True)),
('is_admin', models.BooleanField(default=False)),
('wiki_page', tinymce.models.HTMLField(blank=True, null=True)),
('prim_topic_id', models.IntegerField(blank=True, default=0, null=True)),
('grade_level', models.ManyToManyField(blank=True, related_name='book_grade', to='planit.gradeLevel')),
('standards_set', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardset')),
('subject', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardsubjects')),
('uploaded_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='topicDescription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('topic_id', models.IntegerField(blank=True, default=0, null=True)),
('description', models.CharField(blank=True, max_length=1000, null=True)),
('is_admin', models.BooleanField(default=True)),
('is_gen', models.BooleanField(default=False)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='topicInformation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('topic', models.CharField(blank=True, max_length=500, null=True)),
('item', models.CharField(blank=True, max_length=200, null=True)),
('image_name', models.CharField(blank=True, max_length=500, null=True)),
('image_url', models.URLField(blank=True, max_length=500, null=True)),
('image_file', models.ImageField(blank=True, null=True, upload_to='images/topic/')),
('is_admin', models.BooleanField(default=True)),
('from_wiki', models.BooleanField(default=False)),
('is_secondary', models.BooleanField(default=False)),
('topic_id', models.IntegerField(blank=True, default=0, null=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('description', models.ManyToManyField(blank=True, to='planit.topicDescription')),
('grade_level', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.gradelevel')),
('standard_set', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardset')),
('subject', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardsubjects')),
('text_index', models.ManyToManyField(blank=True, to='planit.textBookBackground')),
],
),
migrations.CreateModel(
name='topicQuestionitem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item', models.CharField(blank=True, max_length=200, null=True)),
('question_points', models.IntegerField(blank=True, default=1, null=True)),
('Question', models.CharField(blank=True, max_length=2500, null=True)),
('Correct', models.CharField(blank=True, max_length=1000, null=True)),
('Incorrect_One', models.CharField(blank=True, max_length=1000, null=True)),
('Incorrect_Two', models.CharField(blank=True, max_length=1000, null=True)),
('Incorrect_Three', models.CharField(blank=True, max_length=1000, null=True)),
('explanation', models.CharField(blank=True, max_length=1500, null=True)),
('is_admin', models.BooleanField(default=True)),
('original_num', models.IntegerField(blank=True, default=0, null=True)),
('trans_line_num', models.IntegerField(blank=True, default=0, null=True)),
('is_video', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='topicTypes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item', models.CharField(blank=True, max_length=200, null=True)),
],
),
migrations.CreateModel(
name='userImageUpload',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=20, null=True)),
('uploaded_image', models.ImageField(blank=True, null=True, upload_to='images/question/')),
('image_url', models.URLField(blank=True, max_length=500, null=True)),
('uploaded_date', models.DateField(blank=True, null=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='userNickname',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=30, null=True)),
('is_firstname', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='waitlistUserInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city', models.CharField(max_length=100)),
('state', models.CharField(max_length=100)),
('country', models.CharField(max_length=100)),
('grade_levels', models.CharField(max_length=100)),
('subjects', models.CharField(max_length=100)),
('school_name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='youtubeLine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vid_id', models.CharField(max_length=1000)),
('line_num', models.IntegerField(blank=True, default=0, null=True)),
('transcript_text', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='youtubeSearchResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_selected', models.BooleanField(default=False)),
('title', models.CharField(max_length=100)),
('description', models.CharField(max_length=500)),
('link', models.CharField(blank=True, max_length=500, null=True)),
('vid_id', models.CharField(max_length=1000)),
('lesson_plan', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.lessonobjective')),
('transcript_lines', models.ManyToManyField(blank=True, to='planit.youtubeLine')),
],
),
migrations.CreateModel(
name='worksheetTheme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=50, null=True)),
('primary', models.CharField(blank=True, max_length=12, null=True)),
('background_color', models.CharField(blank=True, max_length=12, null=True)),
('secondary', models.CharField(blank=True, max_length=12, null=True)),
('link', models.CharField(blank=True, max_length=12, null=True)),
('is_admin', models.BooleanField(default=False)),
('is_seasonal', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=False)),
('background_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='background_image', to='planit.userimageupload')),
('demo_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='demo_image', to='planit.userimageupload')),
('first_name', models.ManyToManyField(blank=True, related_name='first_name', to='planit.userNickname')),
('icon_image', models.ManyToManyField(blank=True, related_name='icon_image', to='planit.userImageUpload')),
('last_name', models.ManyToManyField(blank=True, related_name='last_name', to='planit.userNickname')),
],
),
migrations.CreateModel(
name='worksheetSection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('section_title', models.CharField(blank=True, max_length=200, null=True)),
('directions', models.CharField(blank=True, max_length=1000, null=True)),
('section_image', models.ImageField(blank=True, null=True, upload_to='images/question/')),
('questions', models.ManyToManyField(blank=True, to='planit.topicQuestionitem')),
],
),
migrations.CreateModel(
name='worksheetFull',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_admin', models.BooleanField(default=False)),
('is_complete', models.BooleanField(default=False)),
('is_assigned', models.BooleanField(default=False)),
('title', models.CharField(blank=True, max_length=200, null=True)),
('ws_description', models.CharField(blank=True, max_length=500, null=True)),
('total_possible', models.IntegerField(blank=True, default=0, null=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('grade_level', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.gradelevel')),
('lesson_overview', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.lessonobjective')),
('questions', models.ManyToManyField(blank=True, to='planit.topicQuestionitem')),
('standards_set', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardset')),
('subject', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardsubjects')),
('worksheet_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='worksheet_image', to='planit.userimageupload')),
('worksheet_theme', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='planit.worksheettheme')),
],
),
migrations.CreateModel(
name='worksheetClassAssignment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('week_of', models.IntegerField(blank=True, default=0, null=True)),
('total_possible', models.IntegerField(blank=True, default=0, null=True)),
('due_date', models.DateField(blank=True, null=True)),
('academic_year', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.academicyear')),
('assigned_classrooms', models.ManyToManyField(blank=True, to='planit.classroom')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('lesson_overview', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.lessonobjective')),
('student_answers', models.ManyToManyField(blank=True, to='planit.studentWorksheetAnswerFull')),
('subject', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardsubjects')),
('worksheet_full', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='planit.worksheetfull')),
],
),
migrations.CreateModel(
name='wikiTopic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_selected', models.BooleanField(default=False)),
('topic', models.CharField(max_length=1000)),
('term', models.CharField(blank=True, max_length=300, null=True)),
('relevance', models.IntegerField(blank=True, default=0, null=True)),
('lesson_plan', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.lessonobjective')),
('textbook_match', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.textbooktitle')),
],
),
migrations.CreateModel(
name='weeklyObjectives',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('week_of', models.CharField(max_length=10)),
('objectives', models.ManyToManyField(blank=True, to='planit.lessonObjective')),
('subject_classroom', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.classroom')),
],
),
migrations.CreateModel(
name='vocabularyWord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('word', models.CharField(max_length=50)),
('p_o_s', models.CharField(blank=True, max_length=50, null=True)),
('definition', models.CharField(blank=True, max_length=500, null=True)),
('sentence', models.CharField(blank=True, max_length=300, null=True)),
('audio_file', models.FileField(blank=True, null=True, upload_to='')),
('question_image', models.ImageField(blank=True, null=True, upload_to='images/words/')),
('synonyms', models.CharField(blank=True, max_length=300, null=True)),
('antonyms', models.CharField(blank=True, max_length=300, null=True)),
('matched_standard', models.ManyToManyField(blank=True, to='planit.singleStandard')),
],
),
migrations.CreateModel(
name='vocabularyList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('week_of', models.CharField(max_length=10)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('lesson_plan', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.lessonobjective')),
('subject', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardsubjects')),
('vocab_words', models.ManyToManyField(blank=True, to='planit.vocabularyWord')),
],
),
migrations.AddField(
model_name='topicquestionitem',
name='Question_Image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='question_image', to='planit.userimageupload'),
),
migrations.AddField(
model_name='topicquestionitem',
name='correct_image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='correct_image', to='planit.userimageupload'),
),
migrations.AddField(
model_name='topicquestionitem',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='topicquestionitem',
name='grade_level',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.gradelevel'),
),
migrations.AddField(
model_name='topicquestionitem',
name='in_one_image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='Incorrect_One_image', to='planit.userimageupload'),
),
migrations.AddField(
model_name='topicquestionitem',
name='in_three_image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='Incorrect_Three_image', to='planit.userimageupload'),
),
migrations.AddField(
model_name='topicquestionitem',
name='in_two_image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='Incorrect_Two_image', to='planit.userimageupload'),
),
migrations.AddField(
model_name='topicquestionitem',
name='lesson_overview',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.lessonobjective'),
),
migrations.AddField(
model_name='topicquestionitem',
name='linked_text',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.textbookbackground'),
),
migrations.AddField(
model_name='topicquestionitem',
name='linked_topic',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.topicinformation'),
),
migrations.AddField(
model_name='topicquestionitem',
name='question_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.questiontype'),
),
migrations.AddField(
model_name='topicquestionitem',
name='standard_set',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardset'),
),
migrations.AddField(
model_name='topicquestionitem',
name='subject',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardsubjects'),
),
migrations.AddField(
model_name='topicquestionitem',
name='topic_story',
field=models.ManyToManyField(blank=True, to='planit.storyFull'),
),
migrations.AddField(
model_name='topicquestionitem',
name='topic_type',
field=models.ManyToManyField(blank=True, to='planit.topicTypes'),
),
migrations.CreateModel(
name='topicQuestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item', models.CharField(blank=True, max_length=200, null=True)),
('question_points', models.IntegerField(blank=True, default=1, null=True)),
('Question', models.CharField(blank=True, max_length=2500, null=True)),
('Correct', models.CharField(blank=True, max_length=1000, null=True)),
('Incorrect_One', models.CharField(blank=True, max_length=1000, null=True)),
('Incorrect_Two', models.CharField(blank=True, max_length=1000, null=True)),
('Incorrect_Three', models.CharField(blank=True, max_length=1000, null=True)),
('explanation', models.CharField(blank=True, max_length=1500, null=True)),
('is_admin', models.BooleanField(default=True)),
('original_num', models.IntegerField(blank=True, default=0, null=True)),
('trans_line_num', models.IntegerField(blank=True, default=0, null=True)),
('is_video', models.BooleanField(default=False)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('grade_level', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.gradelevel')),
('lesson_overview', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.lessonobjective')),
('linked_text', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.textbookbackground')),
('linked_topic', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.topicinformation')),
('question_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.questiontype')),
('standard_set', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardset')),
('subject', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardsubjects')),
('topic_story', models.ManyToManyField(blank=True, to='planit.storyFull')),
('topic_type', models.ManyToManyField(blank=True, to='planit.topicTypes')),
],
),
migrations.AddField(
model_name='topicinformation',
name='topic_type',
field=models.ManyToManyField(blank=True, to='planit.topicTypes'),
),
migrations.AddField(
model_name='textbookbackground',
name='textbook',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='planit.textbooktitle'),
),
migrations.CreateModel(
name='teacherLessonTemplates',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lesson_section', models.ManyToManyField(blank=True, to='planit.lessonSectionTemplate')),
('planning_teacher', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='teacherInvitations',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('invite_ref', models.CharField(blank=True, max_length=255, null=True, verbose_name='Teacher Reference')),
('first_name', models.CharField(blank=True, max_length=50, null=True)),
('last_name', models.CharField(blank=True, max_length=50, null=True)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('is_pending', models.BooleanField(default=True)),
('is_waitlist', models.BooleanField(default=False)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='created_by1', to=settings.AUTH_USER_MODEL)),
('for_classroom', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.classroom')),
('new_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='new_user1', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='teacherInvitation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('invite_ref', models.CharField(blank=True, max_length=255, null=True, verbose_name='Teacher Reference')),
('first_name', models.CharField(blank=True, max_length=50, null=True)),
('last_name', models.CharField(blank=True, max_length=50, null=True)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('is_pending', models.BooleanField(default=True)),
('is_waitlist', models.BooleanField(default=False)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='created_by', to=settings.AUTH_USER_MODEL)),
('for_classroom', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.classroom')),
('new_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='new_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='studentworksheetanswerfull',
name='worksheet_assignment',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='planit.worksheetfull'),
),
migrations.AddField(
model_name='studentquestionanswer',
name='question_num',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='planit.topicquestionitem'),
),
migrations.AddField(
model_name='studentquestionanswer',
name='student',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='studentquestionanswer',
name='worksheet_assignment',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='planit.worksheetfull'),
),
migrations.CreateModel(
name='studentProfiles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('student_ref', models.CharField(blank=True, max_length=255, null=True, verbose_name='Student Reference')),
('first_name', models.CharField(blank=True, max_length=50, null=True)),
('last_name', models.CharField(blank=True, max_length=50, null=True)),
('middle_names', models.CharField(blank=True, max_length=200, null=True)),
('date_of_birth', models.DateField(blank=True, null=True)),
('gender', models.CharField(blank=True, max_length=30, null=True)),
('student_pin', models.IntegerField(blank=True, default=0, null=True)),
('is_enrolled', models.BooleanField(default=False)),
('is_suspended', models.BooleanField(default=False)),
('is_deleted', models.BooleanField(default=False)),
('is_pending', models.BooleanField(default=False)),
('current_grade_level', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.gradelevel')),
('student_username', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='student_username', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='studentPraise',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('theme_id', models.IntegerField(blank=True, default=0, null=True)),
('created_by', models.IntegerField(blank=True, default=0, null=True)),
('sent_date', models.DateField(blank=True, null=True)),
('week_of', models.IntegerField(blank=True, default=0, null=True)),
('student', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='studentInvitation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('invite_ref', models.CharField(blank=True, max_length=255, null=True, verbose_name='Student Reference')),
('first_name', models.CharField(blank=True, max_length=50, null=True)),
('last_name', models.CharField(blank=True, max_length=50, null=True)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('is_pending', models.BooleanField(default=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('for_classroom', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.classroom')),
('grade_level', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='planit.gradelevel')),
('student_profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='planit.studentprofiles')),
],
),
migrations.AddField(
model_name='storyfull',
name='section',
field=models.ManyToManyField(blank=True, to='planit.storySection'),
),
migrations.AddField(
model_name='singlestandard',
name='standards_set',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardset'),
),
migrations.AddField(
model_name='singlestandard',
name='subject',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardsubjects'),
),
migrations.CreateModel(
name='singleRec',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sim_score', models.CharField(blank=True, max_length=100, null=True)),
('is_displayed', models.BooleanField(default=False)),
('single_rec_topics', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.topicinformation')),
],
),
migrations.CreateModel(
name='shortStory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=500, null=True)),
('author', models.CharField(blank=True, max_length=200, null=True)),
('story_image', models.ImageField(blank=True, null=True, upload_to='images/story/')),
('story_sections', models.ManyToManyField(blank=True, related_name='objectives_standards', to='planit.shortStorySection')),
],
),
migrations.CreateModel(
name='selectedActivity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lesson_text', models.CharField(blank=True, max_length=1500, null=True)),
('verb', models.CharField(blank=True, max_length=100, null=True)),
('work_product', models.CharField(blank=True, max_length=300, null=True)),
('ks_demo', models.CharField(blank=True, max_length=1000, null=True)),
('bloom', models.IntegerField(blank=True, default=0, null=True)),
('mi', models.IntegerField(blank=True, default=0, null=True)),
('ret_rate', models.IntegerField(blank=True, default=0, null=True)),
('template_id', models.IntegerField(blank=True, default=0, null=True)),
('demo_num', models.IntegerField(blank=True, default=0, null=True)),
('is_admin', models.BooleanField(default=False)),
('is_selected', models.BooleanField(default=False)),
('is_rejected', models.BooleanField(default=False)),
('mi_labels', models.CharField(blank=True, max_length=50, null=True)),
('bl_labels', models.CharField(blank=True, max_length=50, null=True)),
('bl_color', models.CharField(blank=True, max_length=50, null=True)),
('mi_color', models.CharField(blank=True, max_length=50, null=True)),
('mi_icon', models.CharField(blank=True, max_length=50, null=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('lesson_overview', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.lessonobjective')),
('objectives_topics', models.ManyToManyField(blank=True, null=True, related_name='activitiy_topic', to='planit.topicInformation')),
('story', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.shortstory')),
],
),
migrations.CreateModel(
name='reccomendedTopics',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('searched_level', models.IntegerField(blank=True, default=1, null=True)),
('matched_lesson', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.lessonobjective')),
('rec_topics', models.ManyToManyField(blank=True, null=True, related_name='reccomended_topics', to='planit.topicInformation')),
('removed_topics', models.ManyToManyField(blank=True, null=True, related_name='removed_topics', to='planit.topicInformation')),
('single_score', models.ManyToManyField(blank=True, null=True, to='planit.singleRec')),
],
),
migrations.CreateModel(
name='questionList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('week_of', models.CharField(max_length=10)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('lesson_plan', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.lessonobjective')),
('main_questions', models.ManyToManyField(blank=True, to='planit.mainQuestion')),
('subject', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardsubjects')),
],
),
migrations.CreateModel(
name='matchedTopics',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lesson_overview', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.lessonobjective')),
('objectives_topics', models.ManyToManyField(blank=True, null=True, related_name='objectives_activity_topic', to='planit.topicInformation')),
],
),
migrations.CreateModel(
name='lessonText',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_initial', models.BooleanField(default=True)),
('overview', tinymce.models.HTMLField(blank=True, null=True)),
('introduction', tinymce.models.HTMLField(blank=True, null=True)),
('activities', tinymce.models.HTMLField(blank=True, null=True)),
('lesson_terms', tinymce.models.HTMLField(blank=True, null=True)),
('is_plural', models.BooleanField(default=False)),
('is_multi', models.BooleanField(default=False)),
('matched_lesson', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.lessonobjective')),
],
),
migrations.CreateModel(
name='lessonTemplates',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('wording', models.CharField(blank=True, max_length=1000, null=True)),
('verb', models.CharField(blank=True, max_length=100, null=True)),
('work_product', models.CharField(blank=True, max_length=300, null=True)),
('grouping', models.CharField(blank=True, max_length=500, null=True)),
('bloom', models.IntegerField(blank=True, default=0, null=True)),
('mi', models.IntegerField(blank=True, default=0, null=True)),
('is_plural', models.BooleanField(default=False)),
('is_multi', models.BooleanField(default=False)),
('components', models.ManyToManyField(blank=True, related_name='multi_topics', to='planit.topicTypes')),
('grade_level', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.gradelevel')),
('ks_demo', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.learningdemonstrationtemplate')),
('single_topic', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.topictypes')),
('story', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.shortstory')),
('subject', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardsubjects')),
],
),
migrations.CreateModel(
name='lessonStandardRecommendation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_selected', models.BooleanField(default=False)),
('lesson_classroom', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.classroom')),
('objectives', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.lessonobjective')),
('objectives_standard', models.ManyToManyField(blank=True, to='planit.singleStandard')),
],
),
migrations.CreateModel(
name='lessonProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product', models.CharField(blank=True, max_length=200, null=True)),
('mi', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.multipleintelligence')),
('topic_type', models.ManyToManyField(blank=True, to='planit.topicTypes')),
],
),
migrations.CreateModel(
name='lessonPDFText',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pdf_doc', models.FileField(blank=True, null=True, upload_to='file/')),
('content', models.CharField(blank=True, max_length=25000, null=True)),
('matched_lesson', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.lessonobjective')),
('pdf_images', models.ManyToManyField(blank=True, to='planit.lessonPDFImage')),
],
),
migrations.AddField(
model_name='lessonobjective',
name='objectives_standards',
field=models.ManyToManyField(blank=True, null=True, related_name='objectives_standards', to='planit.singleStandard'),
),
migrations.AddField(
model_name='lessonobjective',
name='objectives_topics',
field=models.ManyToManyField(blank=True, null=True, related_name='objectives_topic', to='planit.topicInformation'),
),
migrations.AddField(
model_name='lessonobjective',
name='standard_set',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardset'),
),
migrations.AddField(
model_name='lessonobjective',
name='subject',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardsubjects'),
),
migrations.CreateModel(
name='lessonImageUpload',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image_image', models.ImageField(blank=True, null=True, upload_to='images/words/')),
('content', models.CharField(blank=True, max_length=2000, null=True)),
('matched_lesson', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.lessonobjective')),
],
),
migrations.CreateModel(
name='lessonFull',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=300, null=True)),
('blooms', models.ManyToManyField(blank=True, to='planit.bloomsLevel')),
('lesson_overview', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.lessonobjective')),
('lesson_section', models.ManyToManyField(blank=True, to='planit.lessonSection')),
('main_questions', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.questionlist')),
('styles', models.ManyToManyField(blank=True, to='planit.learningStyle')),
('subject', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardsubjects')),
('vocabulary_list', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.vocabularylist')),
],
),
migrations.AddField(
model_name='learningdemonstrationtemplate',
name='subject',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardsubjects'),
),
migrations.AddField(
model_name='learningdemonstrationtemplate',
name='topic_type',
field=models.ManyToManyField(blank=True, to='planit.topicTypes'),
),
migrations.CreateModel(
name='keywordResults',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_selected', models.BooleanField(default=False)),
('word', models.CharField(blank=True, max_length=100, null=True)),
('p_o_s', models.CharField(blank=True, max_length=50, null=True)),
('definition', models.CharField(blank=True, max_length=600, null=True)),
('sentence', models.CharField(blank=True, max_length=600, null=True)),
('relevance', models.IntegerField(blank=True, default=0, null=True)),
('lesson_plan', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.lessonobjective')),
],
),
migrations.AddField(
model_name='gradelevel',
name='standards_set',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardset'),
),
migrations.CreateModel(
name='googleSearchResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_selected', models.BooleanField(default=False)),
('title', models.CharField(max_length=100)),
('link', models.CharField(max_length=500)),
('snippet', models.CharField(max_length=1000)),
('lesson_plan', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.lessonobjective')),
],
),
migrations.CreateModel(
name='googleRelatedQuestions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.CharField(max_length=500)),
('link', models.CharField(max_length=500)),
('snippet', models.CharField(max_length=1000)),
('relevance', models.IntegerField(blank=True, default=0, null=True)),
('is_selected', models.BooleanField(default=False)),
('lesson_plan', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.lessonobjective')),
],
),
migrations.CreateModel(
name='classroomLists',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('year', models.IntegerField(blank=True, default=0, null=True)),
('academic_year', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.academicyear')),
('lesson_classroom', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.classroom')),
('students', models.ManyToManyField(blank=True, null=True, related_name='classroom_students', to='planit.studentProfiles')),
],
),
migrations.AddField(
model_name='classroom',
name='grade_level',
field=models.ManyToManyField(blank=True, related_name='grade_level', to='planit.gradeLevel'),
),
migrations.AddField(
model_name='classroom',
name='main_teacher',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='main_teacher', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='classroom',
name='single_grade',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='single_grade', to='planit.gradelevel'),
),
migrations.AddField(
model_name='classroom',
name='standards_set',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardset'),
),
migrations.AddField(
model_name='classroom',
name='student',
field=models.ManyToManyField(blank=True, related_name='student', to='planit.studentProfiles'),
),
migrations.AddField(
model_name='classroom',
name='subjects',
field=models.ManyToManyField(blank=True, to='planit.standardSubjects'),
),
migrations.AddField(
model_name='classroom',
name='support_teachers',
field=models.ManyToManyField(blank=True, related_name='support_teacher', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='user',
name='standards_set',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardset'),
),
migrations.AddField(
model_name='user',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
migrations.CreateModel(
name='school_user',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='user_role', serialize=False, to='planit.user')),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('username', models.CharField(max_length=30)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('phone_number', models.CharField(blank=True, max_length=30, null=True)),
('use_whatsapp', models.BooleanField(default=False)),
('user_image', models.ImageField(blank=True, null=True, upload_to='images/')),
('standards_set', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='planit.standardset')),
],
),
]
| 65.239777
| 329
| 0.619961
| 7,428
| 70,198
| 5.703689
| 0.059235
| 0.077962
| 0.049402
| 0.064602
| 0.863408
| 0.845682
| 0.807397
| 0.756202
| 0.735997
| 0.668775
| 0
| 0.010408
| 0.236275
| 70,198
| 1,075
| 330
| 65.300465
| 0.779844
| 0.000641
| 0
| 0.661049
| 1
| 0
| 0.157974
| 0.032002
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.001873
| 0.006554
| 0
| 0.0103
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
16807a6edcad738a063e4203b3c25e54df990619
| 5,310
|
py
|
Python
|
tests/snapshots/snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_DE-2017] 1.py
|
ghlecl/holidata
|
1db24d4aecab7ec7a007720987d84ffb0988b6db
|
[
"MIT"
] | null | null | null |
tests/snapshots/snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_DE-2017] 1.py
|
ghlecl/holidata
|
1db24d4aecab7ec7a007720987d84ffb0988b6db
|
[
"MIT"
] | null | null | null |
tests/snapshots/snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_DE-2017] 1.py
|
ghlecl/holidata
|
1db24d4aecab7ec7a007720987d84ffb0988b6db
|
[
"MIT"
] | null | null | null |
[
{
'date': '2017-01-01',
'description': 'Neujahr',
'locale': 'de-DE',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2017-01-06',
'description': 'Heilige drei Könige',
'locale': 'de-DE',
'notes': '',
'region': 'BW',
'type': 'NRF'
},
{
'date': '2017-01-06',
'description': 'Heilige drei Könige',
'locale': 'de-DE',
'notes': '',
'region': 'BY',
'type': 'NRF'
},
{
'date': '2017-01-06',
'description': 'Heilige drei Könige',
'locale': 'de-DE',
'notes': '',
'region': 'ST',
'type': 'NRF'
},
{
'date': '2017-04-14',
'description': 'Karfreitag',
'locale': 'de-DE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2017-04-16',
'description': 'Ostern',
'locale': 'de-DE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2017-04-17',
'description': 'Ostermontag',
'locale': 'de-DE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2017-05-01',
'description': 'Erster Maifeiertag',
'locale': 'de-DE',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2017-05-25',
'description': 'Christi Himmelfahrt',
'locale': 'de-DE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2017-06-04',
'description': 'Pfingstsonntag',
'locale': 'de-DE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2017-06-05',
'description': 'Pfingstmontag',
'locale': 'de-DE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2017-06-15',
'description': 'Fronleichnam',
'locale': 'de-DE',
'notes': '',
'region': 'BW',
'type': 'NRV'
},
{
'date': '2017-06-15',
'description': 'Fronleichnam',
'locale': 'de-DE',
'notes': '',
'region': 'BY',
'type': 'NRV'
},
{
'date': '2017-06-15',
'description': 'Fronleichnam',
'locale': 'de-DE',
'notes': '',
'region': 'HE',
'type': 'NRV'
},
{
'date': '2017-06-15',
'description': 'Fronleichnam',
'locale': 'de-DE',
'notes': '',
'region': 'NW',
'type': 'NRV'
},
{
'date': '2017-06-15',
'description': 'Fronleichnam',
'locale': 'de-DE',
'notes': '',
'region': 'RP',
'type': 'NRV'
},
{
'date': '2017-06-15',
'description': 'Fronleichnam',
'locale': 'de-DE',
'notes': '',
'region': 'SL',
'type': 'NRV'
},
{
'date': '2017-08-15',
'description': 'Mariä Himmelfahrt',
'locale': 'de-DE',
'notes': '',
'region': 'SL',
'type': 'NRF'
},
{
'date': '2017-10-03',
'description': 'Tag der Deutschen Einheit',
'locale': 'de-DE',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2017-10-31',
'description': 'Reformationstag',
'locale': 'de-DE',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2017-11-01',
'description': 'Allerheiligen',
'locale': 'de-DE',
'notes': '',
'region': 'BW',
'type': 'NRF'
},
{
'date': '2017-11-01',
'description': 'Allerheiligen',
'locale': 'de-DE',
'notes': '',
'region': 'BY',
'type': 'NRF'
},
{
'date': '2017-11-01',
'description': 'Allerheiligen',
'locale': 'de-DE',
'notes': '',
'region': 'NW',
'type': 'NRF'
},
{
'date': '2017-11-01',
'description': 'Allerheiligen',
'locale': 'de-DE',
'notes': '',
'region': 'RP',
'type': 'NRF'
},
{
'date': '2017-11-01',
'description': 'Allerheiligen',
'locale': 'de-DE',
'notes': '',
'region': 'SL',
'type': 'NRF'
},
{
'date': '2017-11-22',
'description': 'Buß- und Bettag',
'locale': 'de-DE',
'notes': '',
'region': 'SN',
'type': 'NRV'
},
{
'date': '2017-12-24',
'description': 'Heilig Abend',
'locale': 'de-DE',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2017-12-25',
'description': 'Weihnachtstag',
'locale': 'de-DE',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2017-12-26',
'description': 'Zweiter Weihnachtstag',
'locale': 'de-DE',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2017-12-31',
'description': 'Silvester',
'locale': 'de-DE',
'notes': '',
'region': '',
'type': 'NF'
}
]
| 21.942149
| 51
| 0.369303
| 422
| 5,310
| 4.646919
| 0.156398
| 0.122387
| 0.152983
| 0.229475
| 0.775115
| 0.761346
| 0.750127
| 0.72718
| 0.72718
| 0.648139
| 0
| 0.075164
| 0.398682
| 5,310
| 242
| 52
| 21.942149
| 0.538992
| 0
| 0
| 0.599174
| 0
| 0
| 0.389381
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bc7fe213ec0444e2ed524a890619f4cffcd4cd42
| 5,582
|
py
|
Python
|
yolo3/models/yolo3_mobilenet.py
|
rootadminWalker/keras-YOLOv3-model-set
|
196ec711975e1821a260a9f6523008bf47ff8c84
|
[
"MIT"
] | null | null | null |
yolo3/models/yolo3_mobilenet.py
|
rootadminWalker/keras-YOLOv3-model-set
|
196ec711975e1821a260a9f6523008bf47ff8c84
|
[
"MIT"
] | null | null | null |
yolo3/models/yolo3_mobilenet.py
|
rootadminWalker/keras-YOLOv3-model-set
|
196ec711975e1821a260a9f6523008bf47ff8c84
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""YOLO_v3 MobileNet Model Defined in Keras."""
from tensorflow.keras.layers import UpSampling2D, Concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.applications.mobilenet import MobileNet
#from yolo3.models.layers import compose, DarknetConv2D, DarknetConv2D_BN_Leaky, Depthwise_Separable_Conv2D_BN_Leaky, make_last_layers, make_depthwise_separable_last_layers, make_spp_depthwise_separable_last_layers
from .layers import yolo3_predictions, yolo3lite_predictions, tiny_yolo3_predictions, tiny_yolo3lite_predictions
def yolo3_mobilenet_body(inputs, num_anchors, num_classes, alpha=1.0):
"""Create YOLO_V3 MobileNet model CNN body in Keras."""
mobilenet = MobileNet(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
print('backbone layers number: {}'.format(len(mobilenet.layers)))
# input: 416 x 416 x 3
# conv_pw_13_relu :13 x 13 x (1024*alpha)
# conv_pw_11_relu :26 x 26 x (512*alpha)
# conv_pw_5_relu : 52 x 52 x (256*alpha)
# f1: 13 x 13 x (1024*alpha)
f1 = mobilenet.get_layer('conv_pw_13_relu').output
# f2: 26 x 26 x (512*alpha)
f2 = mobilenet.get_layer('conv_pw_11_relu').output
# f3: 52 x 52 x (256*alpha)
f3 = mobilenet.get_layer('conv_pw_5_relu').output
f1_channel_num = int(1024*alpha)
f2_channel_num = int(512*alpha)
f3_channel_num = int(256*alpha)
y1, y2, y3 = yolo3_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)
return Model(inputs = inputs, outputs=[y1,y2,y3])
def yolo3lite_mobilenet_body(inputs, num_anchors, num_classes, alpha=1.0):
'''Create YOLO_v3 Lite MobileNet model CNN body in keras.'''
mobilenet = MobileNet(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
print('backbone layers number: {}'.format(len(mobilenet.layers)))
# input: 416 x 416 x 3
# conv_pw_13_relu :13 x 13 x (1024*alpha)
# conv_pw_11_relu :26 x 26 x (512*alpha)
# conv_pw_5_relu : 52 x 52 x (256*alpha)
# f1: 13 x 13 x (1024*alpha)
f1 = mobilenet.get_layer('conv_pw_13_relu').output
# f2: 26 x 26 x (512*alpha)
f2 = mobilenet.get_layer('conv_pw_11_relu').output
# f3: 52 x 52 x (256*alpha)
f3 = mobilenet.get_layer('conv_pw_5_relu').output
f1_channel_num = int(1024*alpha)
f2_channel_num = int(512*alpha)
f3_channel_num = int(256*alpha)
y1, y2, y3 = yolo3lite_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)
return Model(inputs = inputs, outputs=[y1,y2,y3])
def yolo3lite_spp_mobilenet_body(inputs, num_anchors, num_classes, alpha=1.0):
'''Create YOLO_v3 Lite SPP MobileNet model CNN body in keras.'''
mobilenet = MobileNet(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
print('backbone layers number: {}'.format(len(mobilenet.layers)))
# input: 416 x 416 x 3
# conv_pw_13_relu :13 x 13 x (1024*alpha)
# conv_pw_11_relu :26 x 26 x (512*alpha)
# conv_pw_5_relu : 52 x 52 x (256*alpha)
# f1: 13 x 13 x (1024*alpha)
f1 = mobilenet.get_layer('conv_pw_13_relu').output
# f2: 26 x 26 x (512*alpha)
f2 = mobilenet.get_layer('conv_pw_11_relu').output
# f3: 52 x 52 x (256*alpha)
f3 = mobilenet.get_layer('conv_pw_5_relu').output
f1_channel_num = int(1024*alpha)
f2_channel_num = int(512*alpha)
f3_channel_num = int(256*alpha)
y1, y2, y3 = yolo3lite_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes, use_spp=True)
return Model(inputs = inputs, outputs=[y1,y2,y3])
def tiny_yolo3_mobilenet_body(inputs, num_anchors, num_classes, alpha=1.0):
'''Create Tiny YOLO_v3 MobileNet model CNN body in keras.'''
mobilenet = MobileNet(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
print('backbone layers number: {}'.format(len(mobilenet.layers)))
# input: 416 x 416 x 3
# conv_pw_13_relu :13 x 13 x (1024*alpha)
# conv_pw_11_relu :26 x 26 x (512*alpha)
# conv_pw_5_relu : 52 x 52 x (256*alpha)
# f1: 13 x 13 x (1024*alpha)
f1 = mobilenet.get_layer('conv_pw_13_relu').output
# f2: 26 x 26 x (512*alpha)
f2 = mobilenet.get_layer('conv_pw_11_relu').output
f1_channel_num = int(1024*alpha)
f2_channel_num = int(512*alpha)
y1, y2 = tiny_yolo3_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes)
return Model(inputs, [y1,y2])
def tiny_yolo3lite_mobilenet_body(inputs, num_anchors, num_classes, alpha=1.0):
'''Create Tiny YOLO_v3 Lite MobileNet model CNN body in keras.'''
mobilenet = MobileNet(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
print('backbone layers number: {}'.format(len(mobilenet.layers)))
# input: 416 x 416 x 3
# conv_pw_13_relu :13 x 13 x (1024*alpha)
# conv_pw_11_relu :26 x 26 x (512*alpha)
# conv_pw_5_relu : 52 x 52 x (256*alpha)
# f1: 13 x 13 x (1024*alpha)
f1 = mobilenet.get_layer('conv_pw_13_relu').output
# f2: 26 x 26 x (512*alpha)
f2 = mobilenet.get_layer('conv_pw_11_relu').output
f1_channel_num = int(1024*alpha)
f2_channel_num = int(512*alpha)
y1, y2 = tiny_yolo3lite_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes)
return Model(inputs, [y1,y2])
| 41.044118
| 215
| 0.692942
| 890
| 5,582
| 4.088764
| 0.1
| 0.046167
| 0.060731
| 0.075021
| 0.85463
| 0.85463
| 0.85463
| 0.85463
| 0.85463
| 0.843913
| 0
| 0.102036
| 0.190613
| 5,582
| 135
| 216
| 41.348148
| 0.703409
| 0.289502
| 0
| 0.745455
| 0
| 0
| 0.096174
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.072727
| 0
| 0.254545
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4c19eb4b2d8b87c349184259b8ce1176b03fce07
| 47
|
py
|
Python
|
olsq/olsq_qiskit/__init__.py
|
sitaochen/OLSQ
|
59ef01ec184e05f866cabea7178c68cdda6bee3c
|
[
"BSD-3-Clause"
] | 13
|
2020-07-30T17:36:58.000Z
|
2022-03-28T07:14:25.000Z
|
olsq/olsq_qiskit/__init__.py
|
sitaochen/OLSQ
|
59ef01ec184e05f866cabea7178c68cdda6bee3c
|
[
"BSD-3-Clause"
] | 2
|
2021-01-11T02:55:02.000Z
|
2021-07-20T05:38:14.000Z
|
olsq/olsq_qiskit/__init__.py
|
sitaochen/OLSQ
|
59ef01ec184e05f866cabea7178c68cdda6bee3c
|
[
"BSD-3-Clause"
] | 7
|
2020-07-30T17:37:03.000Z
|
2021-05-31T20:51:29.000Z
|
from olsq.olsq_qiskit.solve import OLSQ_qiskit
| 23.5
| 46
| 0.87234
| 8
| 47
| 4.875
| 0.625
| 0.512821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 1
| 47
| 47
| 0.906977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4c2dd6abffc1578445f8e1e5b5c4b1d6a4ccb151
| 13,545
|
py
|
Python
|
restraintmaker/io/Exporter.py
|
rinikerlab/restraintmaker
|
fb30a101044a6bd28fa973b3ffa7582351540d30
|
[
"MIT"
] | 1
|
2021-11-15T14:01:28.000Z
|
2021-11-15T14:01:28.000Z
|
restraintmaker/io/Exporter.py
|
rinikerlab/restraintmaker
|
fb30a101044a6bd28fa973b3ffa7582351540d30
|
[
"MIT"
] | 3
|
2021-10-01T18:35:59.000Z
|
2022-01-28T14:59:34.000Z
|
restraintmaker/io/Exporter.py
|
rinikerlab/restraintmaker
|
fb30a101044a6bd28fa973b3ffa7582351540d30
|
[
"MIT"
] | 3
|
2020-12-27T12:27:15.000Z
|
2021-11-01T13:45:14.000Z
|
"""
The module Exporter is providing the required code for exporting files from PyMol
"""
import typing as t
from math import sqrt
import numpy as np
from restraintmaker.io import Files
from restraintmaker.utils import Utilities as u, Types
from restraintmaker.utils.Utilities import print
class _Exporter():
"""
..class: _exporter
"""
def __init__(self, restraints: t.List[Types._Restraint]):
"""
This class is the private parent
class, that is giving the interface for submethods.
Parameters
----------
restraints : Types
Restraints to be saved
"""
self.restraints = restraints
def get_args(self, input_function: t.Callable[[str], t.Any]):
"""
should be overridden by every subclass of Exporter. It will assign all necessary attributes using input_function
Parameters
----------
input_function: t.Callable[[str],t.Any]
a function that will provide the arguments for the selection in the necessary format.
Returns
-------
NoReturn
Raises
------
u.BadArgumentException
"""
raise NotImplementedError("Direct call of method get_args of abstract parent class _Exporter.")
def export_restraints(settings: t.Dict[str, t.Any], verbose: bool = False):
"""
export_restraints must be overridden by every subclass of Exporter. Writes the restraints into a file.
Parameters
----------
verbose : bool
print progress (True) or not (False)
Returns
-------
NoReturn
"""
raise NotImplementedError("Direct call of method export_disres of abstract parent class _Exporter.")
class Gromos_Distance_Restraint_Exporter(_Exporter):
def __init__(self, restraints: t.List[Types.Distance_Restraint]):
"""
This is a exporting class for Gromos Distance Restraints
Parameters
----------
restraints : Types
Restraints to be saved
"""
for r in restraints:
if not isinstance(r, Types.Distance_Restraint):
raise TypeError('Gromos_Pair_Restriant_Exporter only accepts Pair restraints as input')
super().__init__(restraints)
# attributes to be specified in get_args:
self.out_path = None
def get_args(self, input_function: t.Callable[[str], t.Any]):
"""should be overridden by every subclass of Exporter. It will assign all necessary varaibles using input_function
For Gromos_Exporter: out_path
Parameters
----------
input_function: t.Callable[[str],t.Any]
a function that will provide the arguments for the selection in the necessary format.
Returns
-------
NoReturn
Raises
------
u.BadArgumentException
"""
# Error checking can only be done when we try to acess the file
self.out_path = u.check_or_convert_argument(input_function('Name of the output File:'), str)
if self.out_path == '' or self.out_path == 'None':
raise u.BadArgumentException(
"Empty filename. (Unless you actually wanted to call your file None. \n"
"In which case you have to blame Python's promiscuous type conversion. And yourself, for not using file extensions.)")
def export_restraints(self, verbose: bool = True) -> str:
"""
export_restraints must be overridden by every subclass of Exporter. Writes the restraints into a file.
For Gromos Exporter it will be in a gromos compatible format.
todo: realise these parameters as settings dict, which the user can provide as input
Parameters
----------
verbose : bool
print progress (True) or not (False)
Returns
-------
"""
##disres_file settings
KDISH = 0.1
KDISC = 0.153
fullharm = 1
deviationdist = None
general_dist = None
# build_up clean disres file;
def build_pair_distance_restraints(self, restrain_atoms: t.List[dict], fullharm: int = 1, deviationdist=None,
general_dist=None) -> list:
# Can do it in two steps: 1)type = fullharm, deviationdist, generaldist, 2) value
# check input
if (fullharm == 0):
if (deviationdist != None or general_dist != None):
raise IOError("Please use one option, fullharm, ddist or generaldist.")
elif (deviationdist != None):
if (fullharm == 0 or general_dist != None):
raise IOError("Please use one option, fullharm, ddist or generaldist.")
elif (general_dist != None):
if (deviationdist != None or fullharm == 0):
raise IOError("Please use one option, fullharm, ddist or generaldist.")
if (deviationdist == None):
deviationdist = 0.0
restraint_dict = []
# TODO use zipto iterate over a1 and a2 directly
for r in self.restraints:
a1 = r.atoms[0]
a2 = r.atoms[1]
# TODO:Make distance an attribute of RestraintPair and set it once at creation. BUT THEN WE HAVE TO MAKE SUERE ATOMS ARE NOT CHANGE DAFTER THAT
distance_A = sqrt((float(a1.x) - float(a2.x)) ** 2 + (float(a1.y) - float(a2.y)) ** 2 + (
float(a1.z) - float(a2.z)) ** 2)
distance_nm = round(distance_A, 2) / 10
comment = "##\t" + a1.resn + "/" + a1.name + " " + str(
a1.id) + " - " + a2.resn + "/" + a2.name + " " + str(a2.id) + "\n"
distance = r.atoms
new_entry = Files.Gromos_blocks.atom_pair_distanceRes(i1=a1.id, j1=0, k1=0, l1=0, type1=0, i2=a2.id,
j2=0, k2=0, l2=0, type2=0, r0=np.floor(distance_nm*100)/100, w0=1.0,
rah=fullharm,
comment=comment)
# print(new_entry)
restraint_dict.append(new_entry)
return restraint_dict
disres_out = build_pair_distance_restraints(self, restrain_atoms=self.restraints, fullharm=fullharm,
deviationdist=deviationdist)
##WRITE out disres.dat
print("generate out_dict", mv=0)
disres_out_dict = {"KDISH": KDISH, "KDISC": KDISC,
"RESTRAINTHEADER": "i j k l type i j k l type r0 w0 rah".split(),
# header
"RESTRAINTS": disres_out}
print("generate top_disres obj", mv=0)
disres_file = Files.Gromos_files.disres()
print("top_disres obj add:", mv=0)
disres_file.add_block(blocktitle="TITLE", content="generated disres file with restraintmaker\n",
verbose=True)
disres_file.add_block(blocktitle="DISTANCERESSPEC", content=disres_out_dict, verbose=verbose)
disres_file.write(self.out_path)
print("wrote to: " + self.out_path, mv=4)
return str(disres_file)
class Gromacs_Distance_Restraint_Exporter(_Exporter):
def __init__(self, restraints: t.List[Types.Distance_Restraint]):
"""
This is a exporting class for Gromacs Distance Restraints
Parameters
----------
restraints : Types
Restraints to be saved
"""
for r in restraints:
if not isinstance(r, Types.Distance_Restraint):
raise TypeError('Gromacs_Pair_Restriant_Exporter only accepts Pair restraints as input')
super().__init__(restraints)
# attributes to be specified in get_args:
self.out_path = None
def get_args(self, input_function: t.Callable[[str], t.Any]):
"""should be overridden by every subclass of Exporter. It will assign all necessary varaibles using input_function
For Gromacs_Exporter: out_path
Parameters
----------
input_function: t.Callable[[str],t.Any]
a function that will provide the arguments for the selection in the necessary format.
Returns
-------
NoReturn
Raises
------
u.BadArgumentException
"""
# Error checking can only be done when we try to acess the file
self.out_path = u.check_or_convert_argument(input_function('Name of the output File:'), str)
if self.out_path == '' or self.out_path == 'None':
raise u.BadArgumentException(
"Empty filename. (Unless you actually wanted to call your file None. \n"
"In which case you have to blame Python's promiscuous type conversion. And yourself, for not using file extensions.)")
def export_restraints(self, verbose: bool = True) -> str:
"""
export_restraints must be overridden by every subclass of Exporter. Writes the restraints into a file.
For Gromos Exporter it will be in a gromos compatible format.
todo: realise these parameters as settings dict, which the user can provide as input
Parameters
----------
verbose : bool
print progress (True) or not (False)
Returns
-------
"""
##disres_file settings
KDISH = 0.1
KDISC = 0.153
fullharm = 1
deviationdist = None
general_dist = None
# build_up clean disres file;
def build_pair_distance_restraints(self, restrain_atoms: t.List[dict], fullharm: int = 1, deviationdist=None,
general_dist=None) -> list:
# Can do it in two steps: 1)type = fullharm, deviationdist, generaldist, 2) value
# check input
if (fullharm == 0):
if (deviationdist != None or general_dist != None):
raise IOError("Please use one option, fullharm, ddist or generaldist.")
elif (deviationdist != None):
if (fullharm == 0 or general_dist != None):
raise IOError("Please use one option, fullharm, ddist or generaldist.")
elif (general_dist != None):
if (deviationdist != None or fullharm == 0):
raise IOError("Please use one option, fullharm, ddist or generaldist.")
if (deviationdist == None):
deviationdist = 0.0
restraint_dict = []
# TODO use zipto iterate over a1 and a2 directly
for r in self.restraints:
a1 = r.atoms[0]
a2 = r.atoms[1]
# TODO:Make distance an attribute of RestraintPair and set it once at creation. BUT THEN WE HAVE TO MAKE SUERE ATOMS ARE NOT CHANGE DAFTER THAT
distance_A = sqrt((float(a1.x) - float(a2.x)) ** 2 + (float(a1.y) - float(a2.y)) ** 2 + (
float(a1.z) - float(a2.z)) ** 2)
distance_nm = round(distance_A, 2) / 10
comment = "##\t" + a1.resn + "/" + a1.name + " " + str(
a1.id) + " - " + a2.resn + "/" + a2.name + " " + str(a2.id) + "\n"
distance = r.atoms
new_entry = Files.Gromos_blocks.atom_pair_distanceRes(i1=a1.id, j1=0, k1=0, l1=0, type1=0, i2=a2.id,
j2=0, k2=0, l2=0, type2=0, r0=np.floor(distance_nm*100)/100, w0=1.0,
rah=fullharm,
comment=comment)
# print(new_entry)
restraint_dict.append(new_entry)
return restraint_dict
disres_out = build_pair_distance_restraints(self, restrain_atoms=self.restraints, fullharm=fullharm,
deviationdist=deviationdist)
##WRITE out disres.dat
print("generate out_dict", mv=0)
disres_out_dict = {"KDISH": KDISH, "KDISC": KDISC,
"RESTRAINTHEADER": "i j k l type i j k l type r0 w0 rah".split(),
# header
"RESTRAINTS": disres_out}
print("generate top_disres obj", mv=0)
disres_file = Files.Gromos_files.disres()
print("top_disres obj add:", mv=0)
disres_file.add_block(blocktitle="TITLE", content="generated disres file with restraintmaker\n",
verbose=True)
disres_file.add_block(blocktitle="DISTANCERESSPEC", content=disres_out_dict, verbose=verbose)
disres_file.write(self.out_path)
print("wrote to: " + self.out_path, mv=4)
return str(disres_file)
| 40.798193
| 160
| 0.545367
| 1,506
| 13,545
| 4.786189
| 0.168659
| 0.022198
| 0.018313
| 0.018313
| 0.926609
| 0.918563
| 0.906909
| 0.900805
| 0.895394
| 0.895394
| 0
| 0.017422
| 0.364341
| 13,545
| 331
| 161
| 40.92145
| 0.819744
| 0.256109
| 0
| 0.884892
| 0
| 0.014388
| 0.166927
| 0.006807
| 0
| 0
| 0
| 0.012085
| 0
| 1
| 0.079137
| false
| 0
| 0.043165
| 0
| 0.172662
| 0.064748
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4c39ba1b66963418793325ee1ee196fc35a29c39
| 7,502
|
py
|
Python
|
test_main.py
|
uniqueinx/swenson
|
6a70ec1b077aa6f0506a2bb1cadb45422ed85094
|
[
"MIT"
] | null | null | null |
test_main.py
|
uniqueinx/swenson
|
6a70ec1b077aa6f0506a2bb1cadb45422ed85094
|
[
"MIT"
] | null | null | null |
test_main.py
|
uniqueinx/swenson
|
6a70ec1b077aa6f0506a2bb1cadb45422ed85094
|
[
"MIT"
] | null | null | null |
from fastapi.testclient import TestClient
from main import app
client = TestClient(app)
def test_health_check():
response = client.get("/health_check")
assert response.status_code == 200
assert response.json() == {"status": "healthy"}
def test_all_large_machines():
response = client.get("/coffee_machine?product_type=COFFEE_MACHINE_LARGE")
assert response.status_code == 200
print(response.json())
assert response.json() == [
{
"id": "CM101",
"product_type": "COFFEE_MACHINE_LARGE",
"water_line_compatible": False,
"model": "BASE_MODEL",
},
{
"id": "CM102",
"product_type": "COFFEE_MACHINE_LARGE",
"water_line_compatible": True,
"model": "PREMIUM_MODEL",
},
{
"id": "CM103",
"product_type": "COFFEE_MACHINE_LARGE",
"water_line_compatible": True,
"model": "DELUX_MODEL",
},
]
def test_all_large_pods():
response = client.get("/coffee_pod?product_type=COFFEE_POD_LARGE")
assert response.status_code == 200
assert response.json() == [
{
"id": "CP101",
"product_type": "COFFEE_POD_LARGE",
"coffee_flavor": "COFFEE_FLAVOR_VANILLA",
"pack_size": "12",
},
{
"id": "CP103",
"product_type": "COFFEE_POD_LARGE",
"coffee_flavor": "COFFEE_FLAVOR_VANILLA",
"pack_size": "36",
},
{
"id": "CP111",
"product_type": "COFFEE_POD_LARGE",
"coffee_flavor": "COFFEE_FLAVOR_CARAMEL",
"pack_size": "12",
},
{
"id": "CP113",
"product_type": "COFFEE_POD_LARGE",
"coffee_flavor": "COFFEE_FLAVOR_CARAMEL",
"pack_size": "36",
},
{
"id": "CP121",
"product_type": "COFFEE_POD_LARGE",
"coffee_flavor": "COFFEE_FLAVOR_PSL",
"pack_size": "12",
},
{
"id": "CP123",
"product_type": "COFFEE_POD_LARGE",
"coffee_flavor": "COFFEE_FLAVOR_PSL",
"pack_size": "36",
},
{
"id": "CP131",
"product_type": "COFFEE_POD_LARGE",
"coffee_flavor": "COFFEE_FLAVOR_MOCHA",
"pack_size": "12",
},
{
"id": "CP133",
"product_type": "COFFEE_POD_LARGE",
"coffee_flavor": "COFFEE_FLAVOR_MOCHA",
"pack_size": "36",
},
{
"id": "CP141",
"product_type": "COFFEE_POD_LARGE",
"coffee_flavor": "COFFEE_FLAVOR_HAZELNUT",
"pack_size": "12",
},
{
"id": "CP143",
"product_type": "COFFEE_POD_LARGE",
"coffee_flavor": "COFFEE_FLAVOR_HAZELNUT",
"pack_size": "36",
},
]
def test_all_esspresso_vanilla_pods():
response = client.get(
"/coffee_pod?product_type=ESPRESSO_POD&coffee_flavor=COFFEE_FLAVOR_VANILLA"
)
assert response.status_code == 200
assert response.json() == [
{
"id": "EP003",
"product_type": "ESPRESSO_POD",
"coffee_flavor": "COFFEE_FLAVOR_VANILLA",
"pack_size": "36",
},
{
"id": "EP005",
"product_type": "ESPRESSO_POD",
"coffee_flavor": "COFFEE_FLAVOR_VANILLA",
"pack_size": "60",
},
{
"id": "EP007",
"product_type": "ESPRESSO_POD",
"coffee_flavor": "COFFEE_FLAVOR_VANILLA",
"pack_size": "84",
},
]
def test_all_espresso_machines():
response = client.get("/coffee_machine?product_type=ESPRESSO_POD")
assert response.status_code == 200
assert response.json() == [
{
"id": "EP003",
"product_type": "ESPRESSO_POD",
"coffee_flavor": "COFFEE_FLAVOR_VANILLA",
"pack_size": "36",
},
{
"id": "EP005",
"product_type": "ESPRESSO_POD",
"coffee_flavor": "COFFEE_FLAVOR_VANILLA",
"pack_size": "60",
},
{
"id": "EP007",
"product_type": "ESPRESSO_POD",
"coffee_flavor": "COFFEE_FLAVOR_VANILLA",
"pack_size": "84",
},
{
"id": "EP013",
"product_type": "ESPRESSO_POD",
"coffee_flavor": "COFFEE_FLAVOR_CARAMEL",
"pack_size": "36",
},
{
"id": "EP015",
"product_type": "ESPRESSO_POD",
"coffee_flavor": "COFFEE_FLAVOR_CARAMEL",
"pack_size": "60",
},
{
"id": "EP017",
"product_type": "ESPRESSO_POD",
"coffee_flavor": "COFFEE_FLAVOR_CARAMEL",
"pack_size": "84",
},
]
def test_all_small_pods():
response = client.get("/coffee_pod?product_type=COFFEE_POD_SMALL")
assert response.status_code == 200
assert response.json() == [
{
"id": "CP001",
"product_type": "COFFEE_POD_SMALL",
"coffee_flavor": "COFFEE_FLAVOR_VANILLA",
"pack_size": "12",
},
{
"id": "CP003",
"product_type": "COFFEE_POD_SMALL",
"coffee_flavor": "COFFEE_FLAVOR_VANILLA",
"pack_size": "36",
},
{
"id": "CP011",
"product_type": "COFFEE_POD_SMALL",
"coffee_flavor": "COFFEE_FLAVOR_CARAMEL",
"pack_size": "12",
},
{
"id": "CP013",
"product_type": "COFFEE_POD_SMALL",
"coffee_flavor": "COFFEE_FLAVOR_CARAMEL",
"pack_size": "36",
},
{
"id": "CP021",
"product_type": "COFFEE_POD_SMALL",
"coffee_flavor": "COFFEE_FLAVOR_PSL",
"pack_size": "12",
},
{
"id": "CP023",
"product_type": "COFFEE_POD_SMALL",
"coffee_flavor": "COFFEE_FLAVOR_PSL",
"pack_size": "36",
},
{
"id": "CP031",
"product_type": "COFFEE_POD_SMALL",
"coffee_flavor": "COFFEE_FLAVOR_MOCHA",
"pack_size": "12",
},
{
"id": "CP033",
"product_type": "COFFEE_POD_SMALL",
"coffee_flavor": "COFFEE_FLAVOR_MOCHA",
"pack_size": "36",
},
{
"id": "CP041",
"product_type": "COFFEE_POD_SMALL",
"coffee_flavor": "COFFEE_FLAVOR_HAZELNUT",
"pack_size": "12",
},
{
"id": "CP043",
"product_type": "COFFEE_POD_SMALL",
"coffee_flavor": "COFFEE_FLAVOR_HAZELNUT",
"pack_size": "36",
},
]
def test_all_pods_sold_in_7_dozen_packs():
response = client.get("/coffee_pod?pack_size=84")
assert response.status_code == 200
assert response.json() == [
{
"id": "EP007",
"product_type": "ESPRESSO_POD",
"coffee_flavor": "COFFEE_FLAVOR_VANILLA",
"pack_size": "84",
},
{
"id": "EP017",
"product_type": "ESPRESSO_POD",
"coffee_flavor": "COFFEE_FLAVOR_CARAMEL",
"pack_size": "84",
},
]
| 28.743295
| 83
| 0.48707
| 674
| 7,502
| 5.005935
| 0.130564
| 0.227623
| 0.170717
| 0.227623
| 0.868998
| 0.855957
| 0.839063
| 0.839063
| 0.773563
| 0.694132
| 0
| 0.039713
| 0.368968
| 7,502
| 260
| 84
| 28.853846
| 0.673004
| 0
| 0
| 0.491803
| 0
| 0
| 0.391895
| 0.109171
| 0
| 0
| 0
| 0
| 0.057377
| 1
| 0.028689
| false
| 0
| 0.008197
| 0
| 0.036885
| 0.004098
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4c4a352dae151c06df6bbcc2a04779a0c2d7b17e
| 68,030
|
py
|
Python
|
hubspot/files/files/api/files_api.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | null | null | null |
hubspot/files/files/api/files_api.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | null | null | null |
hubspot/files/files/api/files_api.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Files
Upload and manage files. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from hubspot.files.files.api_client import ApiClient
from hubspot.files.files.exceptions import ApiTypeError, ApiValueError # noqa: F401
class FilesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def archive(self, file_id, **kwargs): # noqa: E501
"""Delete file # noqa: E501
Delete file by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.archive(file_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str file_id: File ID to delete (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.archive_with_http_info(file_id, **kwargs) # noqa: E501
def archive_with_http_info(self, file_id, **kwargs): # noqa: E501
"""Delete file # noqa: E501
Delete file by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.archive_with_http_info(file_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str file_id: File ID to delete (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["file_id"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'" " to method archive" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'file_id' is set
if self.api_client.client_side_validation and (
"file_id" not in local_var_params
or local_var_params["file_id"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `file_id` when calling `archive`"
) # noqa: E501
if (
self.api_client.client_side_validation
and "file_id" in local_var_params
and not re.search(r"\d+", local_var_params["file_id"])
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `file_id` when calling `archive`, must conform to the pattern `/\d+/`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "file_id" in local_var_params:
path_params["fileId"] = local_var_params["file_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["*/*"]
) # noqa: E501
# Authentication setting
auth_settings = ["hapikey", "oauth2"] # noqa: E501
return self.api_client.call_api(
"/files/v3/files/{fileId}",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def archive_gdpr(self, file_id, **kwargs): # noqa: E501
"""GDPR delete # noqa: E501
GDRP delete file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.archive_gdpr(file_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str file_id: ID of file to GDPR delete (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.archive_gdpr_with_http_info(file_id, **kwargs) # noqa: E501
def archive_gdpr_with_http_info(self, file_id, **kwargs): # noqa: E501
"""GDPR delete # noqa: E501
GDRP delete file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.archive_gdpr_with_http_info(file_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str file_id: ID of file to GDPR delete (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["file_id"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method archive_gdpr" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'file_id' is set
if self.api_client.client_side_validation and (
"file_id" not in local_var_params
or local_var_params["file_id"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `file_id` when calling `archive_gdpr`"
) # noqa: E501
if (
self.api_client.client_side_validation
and "file_id" in local_var_params
and not re.search(r"\d+", local_var_params["file_id"])
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `file_id` when calling `archive_gdpr`, must conform to the pattern `/\d+/`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "file_id" in local_var_params:
path_params["fileId"] = local_var_params["file_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["*/*"]
) # noqa: E501
# Authentication setting
auth_settings = ["hapikey", "oauth2"] # noqa: E501
return self.api_client.call_api(
"/files/v3/files/{fileId}/gdpr-delete",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def check_import(self, task_id, **kwargs): # noqa: E501
"""Check import status. # noqa: E501
Check the status of requested import. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.check_import(task_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str task_id: Import by URL task ID (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: FileActionResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.check_import_with_http_info(task_id, **kwargs) # noqa: E501
def check_import_with_http_info(self, task_id, **kwargs): # noqa: E501
"""Check import status. # noqa: E501
Check the status of requested import. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.check_import_with_http_info(task_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str task_id: Import by URL task ID (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(FileActionResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["task_id"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method check_import" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'task_id' is set
if self.api_client.client_side_validation and (
"task_id" not in local_var_params
or local_var_params["task_id"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `task_id` when calling `check_import`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "task_id" in local_var_params:
path_params["taskId"] = local_var_params["task_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json", "*/*"]
) # noqa: E501
# Authentication setting
auth_settings = ["hapikey", "oauth2"] # noqa: E501
return self.api_client.call_api(
"/files/v3/files/import-from-url/async/tasks/{taskId}/status",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="FileActionResponse", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def do_search(self, **kwargs): # noqa: E501
"""Search files # noqa: E501
Search through files in the file manager. Does not display hidden or archived files. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.do_search(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param list[str] properties: Desired file properties in the return object.
:param str after: The maximum offset of items for a given search is 10000. Narrow your search down if you are reaching this limit.
:param str before:
:param int limit: Number of items to return. Maximum limit is 100.
:param list[str] sort: Sort files by a given field.
:param str id: Search files by given ID.
:param datetime created_at: Search files by time of creation.
:param datetime updated_at: Search files by time of latest updated.
:param str name: Search for files containing the given name.
:param str path: Search files by path.
:param int parent_folder_id: Search files within given folder ID.
:param int size: Query by file size.
:param int height: Search files by height of image or video.
:param int width: Search files by width of image or video.
:param str encoding: Search files with specified encoding.
:param str type: Filter by provided file type.
:param str extension: Search files by given extension.
:param str url: Search for given URL
:param bool is_usable_in_content: If true shows files that have been marked to be used in new content. It false shows files that should not be used in new content.
:param bool allows_anonymous_access: If 'true' will show private files; if 'false' will show public files
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: CollectionResponseFile
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.do_search_with_http_info(**kwargs) # noqa: E501
def do_search_with_http_info(self, **kwargs): # noqa: E501
"""Search files # noqa: E501
Search through files in the file manager. Does not display hidden or archived files. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.do_search_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param list[str] properties: Desired file properties in the return object.
:param str after: The maximum offset of items for a given search is 10000. Narrow your search down if you are reaching this limit.
:param str before:
:param int limit: Number of items to return. Maximum limit is 100.
:param list[str] sort: Sort files by a given field.
:param str id: Search files by given ID.
:param datetime created_at: Search files by time of creation.
:param datetime updated_at: Search files by time of latest updated.
:param str name: Search for files containing the given name.
:param str path: Search files by path.
:param int parent_folder_id: Search files within given folder ID.
:param int size: Query by file size.
:param int height: Search files by height of image or video.
:param int width: Search files by width of image or video.
:param str encoding: Search files with specified encoding.
:param str type: Filter by provided file type.
:param str extension: Search files by given extension.
:param str url: Search for given URL
:param bool is_usable_in_content: If true shows files that have been marked to be used in new content. It false shows files that should not be used in new content.
:param bool allows_anonymous_access: If 'true' will show private files; if 'false' will show public files
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(CollectionResponseFile, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
"properties",
"after",
"before",
"limit",
"sort",
"id",
"created_at",
"updated_at",
"name",
"path",
"parent_folder_id",
"size",
"height",
"width",
"encoding",
"type",
"extension",
"url",
"is_usable_in_content",
"allows_anonymous_access",
]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method do_search" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
collection_formats = {}
path_params = {}
query_params = []
if (
"properties" in local_var_params
and local_var_params["properties"] is not None
): # noqa: E501
query_params.append(
("properties", local_var_params["properties"])
) # noqa: E501
collection_formats["properties"] = "multi" # noqa: E501
if (
"after" in local_var_params and local_var_params["after"] is not None
): # noqa: E501
query_params.append(("after", local_var_params["after"])) # noqa: E501
if (
"before" in local_var_params and local_var_params["before"] is not None
): # noqa: E501
query_params.append(("before", local_var_params["before"])) # noqa: E501
if (
"limit" in local_var_params and local_var_params["limit"] is not None
): # noqa: E501
query_params.append(("limit", local_var_params["limit"])) # noqa: E501
if (
"sort" in local_var_params and local_var_params["sort"] is not None
): # noqa: E501
query_params.append(("sort", local_var_params["sort"])) # noqa: E501
collection_formats["sort"] = "multi" # noqa: E501
if (
"id" in local_var_params and local_var_params["id"] is not None
): # noqa: E501
query_params.append(("id", local_var_params["id"])) # noqa: E501
if (
"created_at" in local_var_params
and local_var_params["created_at"] is not None
): # noqa: E501
query_params.append(
("createdAt", local_var_params["created_at"])
) # noqa: E501
if (
"updated_at" in local_var_params
and local_var_params["updated_at"] is not None
): # noqa: E501
query_params.append(
("updatedAt", local_var_params["updated_at"])
) # noqa: E501
if (
"name" in local_var_params and local_var_params["name"] is not None
): # noqa: E501
query_params.append(("name", local_var_params["name"])) # noqa: E501
if (
"path" in local_var_params and local_var_params["path"] is not None
): # noqa: E501
query_params.append(("path", local_var_params["path"])) # noqa: E501
if (
"parent_folder_id" in local_var_params
and local_var_params["parent_folder_id"] is not None
): # noqa: E501
query_params.append(
("parentFolderId", local_var_params["parent_folder_id"])
) # noqa: E501
if (
"size" in local_var_params and local_var_params["size"] is not None
): # noqa: E501
query_params.append(("size", local_var_params["size"])) # noqa: E501
if (
"height" in local_var_params and local_var_params["height"] is not None
): # noqa: E501
query_params.append(("height", local_var_params["height"])) # noqa: E501
if (
"width" in local_var_params and local_var_params["width"] is not None
): # noqa: E501
query_params.append(("width", local_var_params["width"])) # noqa: E501
if (
"encoding" in local_var_params and local_var_params["encoding"] is not None
): # noqa: E501
query_params.append(
("encoding", local_var_params["encoding"])
) # noqa: E501
if (
"type" in local_var_params and local_var_params["type"] is not None
): # noqa: E501
query_params.append(("type", local_var_params["type"])) # noqa: E501
if (
"extension" in local_var_params
and local_var_params["extension"] is not None
): # noqa: E501
query_params.append(
("extension", local_var_params["extension"])
) # noqa: E501
if (
"url" in local_var_params and local_var_params["url"] is not None
): # noqa: E501
query_params.append(("url", local_var_params["url"])) # noqa: E501
if (
"is_usable_in_content" in local_var_params
and local_var_params["is_usable_in_content"] is not None
): # noqa: E501
query_params.append(
("isUsableInContent", local_var_params["is_usable_in_content"])
) # noqa: E501
if (
"allows_anonymous_access" in local_var_params
and local_var_params["allows_anonymous_access"] is not None
): # noqa: E501
query_params.append(
("allowsAnonymousAccess", local_var_params["allows_anonymous_access"])
) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json", "*/*"]
) # noqa: E501
# Authentication setting
auth_settings = ["hapikey", "oauth2"] # noqa: E501
return self.api_client.call_api(
"/files/v3/files/search",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="CollectionResponseFile", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_by_id(self, file_id, **kwargs): # noqa: E501
"""Get file. # noqa: E501
Get file by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_by_id(file_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str file_id: Id of the desired file. (required)
:param list[str] properties:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: File
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.get_by_id_with_http_info(file_id, **kwargs) # noqa: E501
def get_by_id_with_http_info(self, file_id, **kwargs): # noqa: E501
"""Get file. # noqa: E501
Get file by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_by_id_with_http_info(file_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str file_id: Id of the desired file. (required)
:param list[str] properties:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(File, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["file_id", "properties"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_by_id" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'file_id' is set
if self.api_client.client_side_validation and (
"file_id" not in local_var_params
or local_var_params["file_id"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `file_id` when calling `get_by_id`"
) # noqa: E501
if (
self.api_client.client_side_validation
and "file_id" in local_var_params
and not re.search(r"\d+", local_var_params["file_id"])
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `file_id` when calling `get_by_id`, must conform to the pattern `/\d+/`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "file_id" in local_var_params:
path_params["fileId"] = local_var_params["file_id"] # noqa: E501
query_params = []
if (
"properties" in local_var_params
and local_var_params["properties"] is not None
): # noqa: E501
query_params.append(
("properties", local_var_params["properties"])
) # noqa: E501
collection_formats["properties"] = "multi" # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json", "*/*"]
) # noqa: E501
# Authentication setting
auth_settings = ["hapikey", "oauth2"] # noqa: E501
return self.api_client.call_api(
"/files/v3/files/{fileId}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="File", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_signed_url(self, file_id, **kwargs): # noqa: E501
"""Get signed URL to access private file. # noqa: E501
Generates signed URL that allows temporary access to a private file. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_signed_url(file_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str file_id: ID of file. (required)
:param str size: For image files. This will resize the image to the desired size before sharing. Does not affect the original file, just the file served by this signed URL.
:param int expiration_seconds: How long in seconds the link will provide access to the file.
:param bool upscale: If size is provided, this will upscale the image to fit the size dimensions.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: SignedUrl
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.get_signed_url_with_http_info(file_id, **kwargs) # noqa: E501
def get_signed_url_with_http_info(self, file_id, **kwargs): # noqa: E501
"""Get signed URL to access private file. # noqa: E501
Generates signed URL that allows temporary access to a private file. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_signed_url_with_http_info(file_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str file_id: ID of file. (required)
:param str size: For image files. This will resize the image to the desired size before sharing. Does not affect the original file, just the file served by this signed URL.
:param int expiration_seconds: How long in seconds the link will provide access to the file.
:param bool upscale: If size is provided, this will upscale the image to fit the size dimensions.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(SignedUrl, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["file_id", "size", "expiration_seconds", "upscale"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_signed_url" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'file_id' is set
if self.api_client.client_side_validation and (
"file_id" not in local_var_params
or local_var_params["file_id"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `file_id` when calling `get_signed_url`"
) # noqa: E501
if (
self.api_client.client_side_validation
and "file_id" in local_var_params
and not re.search(r"\d+", local_var_params["file_id"])
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `file_id` when calling `get_signed_url`, must conform to the pattern `/\d+/`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "file_id" in local_var_params:
path_params["fileId"] = local_var_params["file_id"] # noqa: E501
query_params = []
if (
"size" in local_var_params and local_var_params["size"] is not None
): # noqa: E501
query_params.append(("size", local_var_params["size"])) # noqa: E501
if (
"expiration_seconds" in local_var_params
and local_var_params["expiration_seconds"] is not None
): # noqa: E501
query_params.append(
("expirationSeconds", local_var_params["expiration_seconds"])
) # noqa: E501
if (
"upscale" in local_var_params and local_var_params["upscale"] is not None
): # noqa: E501
query_params.append(("upscale", local_var_params["upscale"])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json", "*/*"]
) # noqa: E501
# Authentication setting
auth_settings = ["hapikey", "oauth2"] # noqa: E501
return self.api_client.call_api(
"/files/v3/files/{fileId}/signed-url",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="SignedUrl", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def import_from_url(self, import_from_url_input, **kwargs): # noqa: E501
"""Import a file from a URL into the file manager. # noqa: E501
Asynchronously imports the file at the given URL into the file manager. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.import_from_url(import_from_url_input, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param ImportFromUrlInput import_from_url_input: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ImportFromUrlTaskLocator
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.import_from_url_with_http_info(
import_from_url_input, **kwargs
) # noqa: E501
def import_from_url_with_http_info(
self, import_from_url_input, **kwargs
): # noqa: E501
"""Import a file from a URL into the file manager. # noqa: E501
Asynchronously imports the file at the given URL into the file manager. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.import_from_url_with_http_info(import_from_url_input, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param ImportFromUrlInput import_from_url_input: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ImportFromUrlTaskLocator, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["import_from_url_input"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method import_from_url" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'import_from_url_input' is set
if self.api_client.client_side_validation and (
"import_from_url_input" not in local_var_params
or local_var_params["import_from_url_input"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `import_from_url_input` when calling `import_from_url`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "import_from_url_input" in local_var_params:
body_params = local_var_params["import_from_url_input"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json", "*/*"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["hapikey", "oauth2"] # noqa: E501
return self.api_client.call_api(
"/files/v3/files/import-from-url/async",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="ImportFromUrlTaskLocator", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def replace(self, file_id, **kwargs): # noqa: E501
"""Replace file. # noqa: E501
Replace existing file data with new file data. Can be used to change image content without having to upload a new file and update all references. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace(file_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str file_id: Id of the desired file. (required)
:param str user_agent:
:param str file: File data that will replace existing file in the file manager.
:param str charset_hunch: Character set of given file data.
:param str options: JSON String representing FileReplaceOptions
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: File
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.replace_with_http_info(file_id, **kwargs) # noqa: E501
def replace_with_http_info(self, file_id, **kwargs): # noqa: E501
"""Replace file. # noqa: E501
Replace existing file data with new file data. Can be used to change image content without having to upload a new file and update all references. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_with_http_info(file_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str file_id: Id of the desired file. (required)
:param str user_agent:
:param str file: File data that will replace existing file in the file manager.
:param str charset_hunch: Character set of given file data.
:param str options: JSON String representing FileReplaceOptions
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(File, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["file_id", "user_agent", "file", "charset_hunch", "options"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'" " to method replace" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'file_id' is set
if self.api_client.client_side_validation and (
"file_id" not in local_var_params
or local_var_params["file_id"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `file_id` when calling `replace`"
) # noqa: E501
if (
self.api_client.client_side_validation
and "file_id" in local_var_params
and not re.search(r"\d+", local_var_params["file_id"])
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `file_id` when calling `replace`, must conform to the pattern `/\d+/`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "file_id" in local_var_params:
path_params["fileId"] = local_var_params["file_id"] # noqa: E501
query_params = []
header_params = {}
if "user_agent" in local_var_params:
header_params["User-Agent"] = local_var_params["user_agent"] # noqa: E501
form_params = []
local_var_files = {}
if "file" in local_var_params:
form_params.append(("file", local_var_params["file"])) # noqa: E501
if "charset_hunch" in local_var_params:
form_params.append(
("charsetHunch", local_var_params["charset_hunch"])
) # noqa: E501
if "options" in local_var_params:
form_params.append(("options", local_var_params["options"])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json", "*/*"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["multipart/form-data"]
) # noqa: E501
# Authentication setting
auth_settings = ["hapikey", "oauth2"] # noqa: E501
return self.api_client.call_api(
"/files/v3/files/{fileId}",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="File", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def update_properties(self, file_id, file_update_input, **kwargs): # noqa: E501
"""update file properties # noqa: E501
Update properties of file by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_properties(file_id, file_update_input, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str file_id: ID of file to update (required)
:param FileUpdateInput file_update_input: Options to update. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: File
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.update_properties_with_http_info(
file_id, file_update_input, **kwargs
) # noqa: E501
def update_properties_with_http_info(
self, file_id, file_update_input, **kwargs
): # noqa: E501
"""update file properties # noqa: E501
Update properties of file by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_properties_with_http_info(file_id, file_update_input, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str file_id: ID of file to update (required)
:param FileUpdateInput file_update_input: Options to update. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(File, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["file_id", "file_update_input"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_properties" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'file_id' is set
if self.api_client.client_side_validation and (
"file_id" not in local_var_params
or local_var_params["file_id"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `file_id` when calling `update_properties`"
) # noqa: E501
# verify the required parameter 'file_update_input' is set
if self.api_client.client_side_validation and (
"file_update_input" not in local_var_params
or local_var_params["file_update_input"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `file_update_input` when calling `update_properties`"
) # noqa: E501
if (
self.api_client.client_side_validation
and "file_id" in local_var_params
and not re.search(r"\d+", local_var_params["file_id"])
): # noqa: E501
raise ApiValueError(
"Invalid value for parameter `file_id` when calling `update_properties`, must conform to the pattern `/\d+/`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "file_id" in local_var_params:
path_params["fileId"] = local_var_params["file_id"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "file_update_input" in local_var_params:
body_params = local_var_params["file_update_input"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json", "*/*"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["hapikey", "oauth2"] # noqa: E501
return self.api_client.call_api(
"/files/v3/files/{fileId}",
"PATCH",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="File", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def upload(self, **kwargs): # noqa: E501
"""Upload file # noqa: E501
Upload a single file with content specified in request body. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str user_agent:
:param str file: File to be uploaded.
:param str folder_id: Either 'folderId' or 'folderPath' is required. folderId is the ID of the folder the file will be uploaded to.
:param str folder_path: Either 'folderPath' or 'folderId' is required. This field represents the destination folder path for the uploaded file. If a path doesn't exist, the system will try to create one.
:param str file_name: Desired name for the uploaded file.
:param str charset_hunch: Character set of the uploaded file.
:param str options: JSON string representing FileUploadOptions.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: File
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.upload_with_http_info(**kwargs) # noqa: E501
def upload_with_http_info(self, **kwargs): # noqa: E501
"""Upload file # noqa: E501
Upload a single file with content specified in request body. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str user_agent:
:param str file: File to be uploaded.
:param str folder_id: Either 'folderId' or 'folderPath' is required. folderId is the ID of the folder the file will be uploaded to.
:param str folder_path: Either 'folderPath' or 'folderId' is required. This field represents the destination folder path for the uploaded file. If a path doesn't exist, the system will try to create one.
:param str file_name: Desired name for the uploaded file.
:param str charset_hunch: Character set of the uploaded file.
:param str options: JSON string representing FileUploadOptions.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(File, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
"user_agent",
"file",
"folder_id",
"folder_path",
"file_name",
"charset_hunch",
"options",
]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'" " to method upload" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if "user_agent" in local_var_params:
header_params["User-Agent"] = local_var_params["user_agent"] # noqa: E501
form_params = []
local_var_files = {}
if "file" in local_var_params:
form_params.append(("file", local_var_params["file"])) # noqa: E501
if "folder_id" in local_var_params:
form_params.append(
("folderId", local_var_params["folder_id"])
) # noqa: E501
if "folder_path" in local_var_params:
form_params.append(
("folderPath", local_var_params["folder_path"])
) # noqa: E501
if "file_name" in local_var_params:
form_params.append(
("fileName", local_var_params["file_name"])
) # noqa: E501
if "charset_hunch" in local_var_params:
form_params.append(
("charsetHunch", local_var_params["charset_hunch"])
) # noqa: E501
if "options" in local_var_params:
form_params.append(("options", local_var_params["options"])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json", "*/*"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["multipart/form-data"]
) # noqa: E501
# Authentication setting
auth_settings = ["hapikey", "oauth2"] # noqa: E501
return self.api_client.call_api(
"/files/v3/files",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="File", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
| 43.24857
| 211
| 0.582228
| 7,760
| 68,030
| 4.868943
| 0.041108
| 0.05124
| 0.082259
| 0.024985
| 0.940105
| 0.932351
| 0.924913
| 0.913083
| 0.882593
| 0.873197
| 0
| 0.01667
| 0.341305
| 68,030
| 1,572
| 212
| 43.276081
| 0.826493
| 0.43131
| 0
| 0.681558
| 1
| 0
| 0.182114
| 0.035484
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024055
| false
| 0
| 0.028637
| 0
| 0.076747
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4c4f2f440efe9995d4cc3a56e67d099b0fec2936
| 1,965
|
py
|
Python
|
src/printing/fonts/PSFont_Courier_BoldOblique.py
|
vadmium/grailbrowser
|
ca94e6db2359bcb16c0da256771550d1327c6d33
|
[
"CNRI-Python",
"CNRI-Jython"
] | 9
|
2015-03-23T23:21:42.000Z
|
2021-08-01T01:47:22.000Z
|
src/printing/fonts/PSFont_Courier_BoldOblique.py
|
vadmium/grailbrowser
|
ca94e6db2359bcb16c0da256771550d1327c6d33
|
[
"CNRI-Python",
"CNRI-Jython"
] | null | null | null |
src/printing/fonts/PSFont_Courier_BoldOblique.py
|
vadmium/grailbrowser
|
ca94e6db2359bcb16c0da256771550d1327c6d33
|
[
"CNRI-Python",
"CNRI-Jython"
] | 11
|
2015-03-23T23:22:22.000Z
|
2020-06-08T14:24:17.000Z
|
# Character width information for PostScript font `Courier Bold Oblique'
# generated from the Adobe Font Metric file `../../../../adobe/cobo____.afm'. Adobe
# copyright notice follows:
#
# Copyright (c) 1989, 1990, 1991, Adobe Systems Incorporated. All rights reserved.
#
from . import PSFont
font = PSFont.PSFont('Courier-BoldOblique', 'Courier Bold Oblique',
[ 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 600, 600, 600, 600, 600, 600, 600,
600, 600, 600, 600, 600, 600, 600, 600,
0, 600, 600, 600, 600, 0, 600, 600,
600, 600, 600, 600, 600, 600, 0, 600,
0, 600, 600, 600, 600, 600, 600, 600,
600, 0, 600, 600, 0, 600, 600, 600,
600, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 600, 0, 600, 0, 0, 0, 0,
600, 600, 600, 600, 0, 0, 0, 0,
0, 600, 0, 0, 0, 600, 0, 0,
600, 600, 600, 600, 0, 0, 0, 0,
])
| 46.785714
| 84
| 0.41883
| 301
| 1,965
| 2.72093
| 0.112957
| 0.989011
| 1.384615
| 1.728938
| 0.676435
| 0.672772
| 0.666667
| 0.660562
| 0.65812
| 0.654457
| 0
| 0.502664
| 0.426972
| 1,965
| 41
| 85
| 47.926829
| 0.224689
| 0.132316
| 0
| 0.685714
| 1
| 0
| 0.022968
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.028571
| 0
| 0.028571
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d5bfd527041c1663d1deb9a954a7ebdea36a9875
| 6,717
|
py
|
Python
|
starlingx/starlingx/proxy/views/services.py
|
onap/multicloud-openstack
|
d0e41eb1b1a1cb79365836da728908ed26253db4
|
[
"CC-BY-4.0"
] | 4
|
2018-10-24T15:20:14.000Z
|
2020-03-09T06:29:11.000Z
|
starlingx/starlingx/proxy/views/services.py
|
onap/multicloud-openstack
|
d0e41eb1b1a1cb79365836da728908ed26253db4
|
[
"CC-BY-4.0"
] | null | null | null |
starlingx/starlingx/proxy/views/services.py
|
onap/multicloud-openstack
|
d0e41eb1b1a1cb79365836da728908ed26253db4
|
[
"CC-BY-4.0"
] | 2
|
2020-08-03T13:45:44.000Z
|
2021-09-15T21:10:26.000Z
|
# Copyright (c) 2019 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
# from rest_framework import status
# from django.conf import settings
from newton_base.proxy import services as newton_services
from common.msapi import extsys
logger = logging.getLogger(__name__)
# DEBUG=True
class Services(newton_services.Services):
def __init__(self):
super(Services, self).__init__()
# self._logger = logger
class GetTenants(newton_services.GetTenants):
'''
Backward compatible API for /v2.0/tenants
'''
def __init__(self):
super(GetTenants, self).__init__()
# self._logger = logger
def get(self, request, vimid="", servicetype="identity", requri='v3/projects'):
self._logger.info("vimid: %s" % (vimid))
self._logger.debug("with servicetype, requri> %s,%s" % (servicetype, requri))
return super(GetTenants, self).get(request, vimid, servicetype, requri)
class APIv1Services(Services):
def __init__(self):
super(APIv1Services, self).__init__()
# self._logger = logger
def head(self, request, cloud_owner="", cloud_region_id="", servicetype="", requri=""):
self._logger.info("cloud_owner,cloud_region_id: %s,%s" % (cloud_owner, cloud_region_id))
# self._logger.info("servicetype, requri> %s,%s" % (servicetype, requri))
# self._logger.debug("META, data> %s , %s" % (request.META, request.data))
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
return super(APIv1Services, self).head(request, vimid, servicetype, requri)
def get(self, request, cloud_owner="", cloud_region_id="", servicetype="", requri=""):
self._logger.info("cloud_owner,cloud_region_id: %s,%s" % (cloud_owner, cloud_region_id))
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
return super(APIv1Services, self).get(request, vimid, servicetype, requri)
def post(self, request, cloud_owner="", cloud_region_id="", servicetype="", requri=""):
self._logger.info("cloud_owner,cloud_region_id: %s,%s" % (cloud_owner, cloud_region_id))
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
return super(APIv1Services, self).post(request, vimid, servicetype, requri)
def put(self, request, cloud_owner="", cloud_region_id="", servicetype="", requri=""):
self._logger.info("cloud_owner,cloud_region_id: %s,%s" % (cloud_owner, cloud_region_id))
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
return super(APIv1Services, self).put(request, vimid, servicetype, requri)
def patch(self, request, cloud_owner="", cloud_region_id="", servicetype="", requri=""):
self._logger.info("cloud_owner,cloud_region_id: %s,%s" % (cloud_owner, cloud_region_id))
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
return super(APIv1Services, self).patch(request, vimid, servicetype, requri)
def delete(self, request, cloud_owner="", cloud_region_id="", servicetype="", requri=""):
self._logger.info("cloud_owner,cloud_region_id: %s,%s" % (cloud_owner, cloud_region_id))
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
return super(APIv1Services, self).delete(request, vimid, servicetype, requri)
class APIv1GetTenants(GetTenants):
'''
Backward compatible API for /v2.0/tenants
'''
def __init__(self):
super(APIv1GetTenants, self).__init__()
# self._logger = logger
def head(self, request, cloud_owner="", cloud_region_id="", servicetype="identity", requri=""):
self._logger.info("cloud_owner,cloud_region_id: %s,%s" % (cloud_owner, cloud_region_id))
# self._logger.info("servicetype, requri> %s,%s" % (servicetype, requri))
# self._logger.debug("META, data> %s , %s" % (request.META, request.data))
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
return super(APIv1GetTenants, self).head(request, vimid, servicetype, requri)
def get(self, request, cloud_owner="", cloud_region_id="", servicetype="identity", requri='v3/projects'):
self._logger.info("cloud_owner,cloud_region_id: %s,%s" % (cloud_owner, cloud_region_id))
# self._logger.debug("with servicetype, requri> %s,%s" % (servicetype, requri))
# self._logger.debug("with META> %s" % request.META)
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
return super(APIv1GetTenants, self).get(request, vimid, servicetype, requri)
def post(self, request, cloud_owner="", cloud_region_id="", servicetype="identity", requri=""):
self._logger.info("cloud_owner,cloud_region_id: %s,%s" % (cloud_owner, cloud_region_id))
# self._logger.debug("with servicetype, requri> %s,%s" % (servicetype, requri))
# self._logger.debug("with META> %s" % request.META)
# self._logger.debug("with data> %s" % request.data)
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
return super(APIv1GetTenants, self).post(request, vimid, servicetype, requri)
def put(self, request, cloud_owner="", cloud_region_id="", servicetype="identity", requri=""):
self._logger.info("cloud_owner,cloud_region_id: %s,%s" % (cloud_owner, cloud_region_id))
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
return super(APIv1GetTenants, self).put(request, vimid, servicetype, requri)
def patch(self, request, cloud_owner="", cloud_region_id="", servicetype="identity", requri=""):
self._logger.info("cloud_owner,cloud_region_id: %s,%s" % (cloud_owner, cloud_region_id))
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
return super(APIv1GetTenants, self).patch(request, vimid, servicetype, requri)
def delete(self, request, cloud_owner="", cloud_region_id="", servicetype="identity", requri=""):
self._logger.info("cloud_owner,cloud_region_id: %s,%s" % (cloud_owner, cloud_region_id))
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
return super(APIv1GetTenants, self).delete(request, vimid, servicetype, requri)
| 45.693878
| 109
| 0.693167
| 860
| 6,717
| 5.139535
| 0.14186
| 0.108597
| 0.162896
| 0.228054
| 0.811765
| 0.793213
| 0.763122
| 0.763122
| 0.749774
| 0.749774
| 0
| 0.005395
| 0.172101
| 6,717
| 146
| 110
| 46.006849
| 0.789426
| 0.215572
| 0
| 0.411765
| 0
| 0
| 0.10096
| 0.064491
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.044118
| 0
| 0.544118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
d5d59546f3c5ce6e7a9abfe902e16ef98d0b7a56
| 193
|
py
|
Python
|
example_snippets/multimenus_snippets/Snippets/NumPy/Pretty printing/Formatting functions for specific dtypes/Set formatter for all `complex` types.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | null | null | null |
example_snippets/multimenus_snippets/Snippets/NumPy/Pretty printing/Formatting functions for specific dtypes/Set formatter for all `complex` types.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | null | null | null |
example_snippets/multimenus_snippets/Snippets/NumPy/Pretty printing/Formatting functions for specific dtypes/Set formatter for all `complex` types.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | 1
|
2021-02-04T04:51:48.000Z
|
2021-02-04T04:51:48.000Z
|
def format_complex_kind(x):
return '{0.real}+1j*{0.imag}'.format(x)
with printoptions(formatter={'complex_kind': format_complex_kind}):
print(np.random.random(5)+1j*np.random.random(5))
| 48.25
| 67
| 0.73057
| 31
| 193
| 4.387097
| 0.548387
| 0.242647
| 0.25
| 0.220588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033708
| 0.07772
| 193
| 4
| 68
| 48.25
| 0.730337
| 0
| 0
| 0
| 0
| 0
| 0.164948
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.25
| 0.5
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
|
0
| 7
|
d5f3da415bd99552c4ce2b14f7acbbbb7db58232
| 68,632
|
py
|
Python
|
benchmarks/SimResults/combinations_spec_ml_fulltrained/oldstuff/cmp_sjengpovrayomnetppmilc/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/combinations_spec_ml_fulltrained/oldstuff/cmp_sjengpovrayomnetppmilc/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/combinations_spec_ml_fulltrained/oldstuff/cmp_sjengpovrayomnetppmilc/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0463191,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.239069,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.23226,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.258417,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.447485,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.256645,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.962547,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.219825,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.72588,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0438789,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00936782,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0858089,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0692808,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.129688,
'Execution Unit/Register Files/Runtime Dynamic': 0.0786486,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.220039,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.511212,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 2.19685,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00206187,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00206187,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00180582,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.0007045,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000995224,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00692478,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0194138,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0666014,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.23642,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.215799,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.226208,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 6.66368,
'Instruction Fetch Unit/Runtime Dynamic': 0.534947,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0632397,
'L2/Runtime Dynamic': 0.0190059,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.28238,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.00916,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.066169,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.066169,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.59612,
'Load Store Unit/Runtime Dynamic': 1.40165,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.163161,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.326323,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0579065,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0587829,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.263405,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0355943,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.522013,
'Memory Management Unit/Runtime Dynamic': 0.0943772,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 21.1326,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.153084,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0150561,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.130998,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.299138,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 4.54598,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.051496,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.243136,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.264105,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.172171,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.277705,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.140176,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.590052,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.156423,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.6023,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0498952,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00722162,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0720697,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0534083,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.121965,
'Execution Unit/Register Files/Runtime Dynamic': 0.0606299,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.164707,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.383923,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.68312,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00136148,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00136148,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00121748,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000488605,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000767215,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00470765,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0119237,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0513428,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.26584,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.162839,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.174383,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.64286,
'Instruction Fetch Unit/Runtime Dynamic': 0.405196,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0482924,
'L2/Runtime Dynamic': 0.0108041,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.97646,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.845576,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0562718,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0562719,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.24219,
'Load Store Unit/Runtime Dynamic': 1.17936,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.138757,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.277514,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0492452,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0498317,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.203058,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0271063,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.443761,
'Memory Management Unit/Runtime Dynamic': 0.076938,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 17.5689,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.131252,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00936518,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0849226,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.22554,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.58096,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0372227,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.231925,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.186831,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.161416,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.260359,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.13142,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.553195,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.15597,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.46536,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0352963,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00677053,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0634716,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0500722,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0987679,
'Execution Unit/Register Files/Runtime Dynamic': 0.0568427,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.143025,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.332289,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.57963,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.0015037,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.0015037,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00135614,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000550377,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000719291,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00508283,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0127586,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0481357,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.06184,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.150088,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.16349,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.42896,
'Instruction Fetch Unit/Runtime Dynamic': 0.379556,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0290294,
'L2/Runtime Dynamic': 0.00727207,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.62107,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.67339,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0447739,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.044774,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.8325,
'Load Store Unit/Runtime Dynamic': 0.938974,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.110405,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.22081,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0391831,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0395497,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.190374,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0248103,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.413793,
'Memory Management Unit/Runtime Dynamic': 0.06436,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 16.7591,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0928486,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00841261,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0803222,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.181583,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.15137,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0141222,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.213781,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.071638,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0965006,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.155652,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0785678,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.33072,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0993847,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.14611,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.013534,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00404767,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0347447,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.029935,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0482787,
'Execution Unit/Register Files/Runtime Dynamic': 0.0339827,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0767287,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.184313,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.16817,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00108012,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00108012,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000974869,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000396031,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000430019,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00356513,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00913821,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0287773,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.83048,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0954739,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0977406,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.13783,
'Instruction Fetch Unit/Runtime Dynamic': 0.234695,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0269478,
'L2/Runtime Dynamic': 0.00811837,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.09219,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.422948,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0276636,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0276635,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.22283,
'Load Store Unit/Runtime Dynamic': 0.587038,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0682137,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.136427,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0242093,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0245942,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.113812,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0157097,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.311509,
'Memory Management Unit/Runtime Dynamic': 0.0403039,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.4347,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0356013,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0047871,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0483609,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0887493,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.12708,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 3.7994886743776113,
'Runtime Dynamic': 3.7994886743776113,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.196061,
'Runtime Dynamic': 0.0827429,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 70.0914,
'Peak Power': 103.204,
'Runtime Dynamic': 13.4881,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 69.8953,
'Total Cores/Runtime Dynamic': 13.4054,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.196061,
'Total L3s/Runtime Dynamic': 0.0827429,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
| 75.089716
| 124
| 0.682189
| 8,082
| 68,632
| 5.787181
| 0.067929
| 0.123493
| 0.112888
| 0.093389
| 0.938403
| 0.931113
| 0.918135
| 0.886642
| 0.862418
| 0.841914
| 0
| 0.132323
| 0.224254
| 68,632
| 914
| 125
| 75.089716
| 0.746173
| 0
| 0
| 0.642232
| 0
| 0
| 0.657191
| 0.048082
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9103def0ad3ff8d5978a57569566805fac0bbf55
| 9,083
|
py
|
Python
|
test/test_shannon.py
|
dglmoore/pyinform
|
e2f3d4b3e0353b9cfa0f9111227336a2634980d9
|
[
"MIT"
] | null | null | null |
test/test_shannon.py
|
dglmoore/pyinform
|
e2f3d4b3e0353b9cfa0f9111227336a2634980d9
|
[
"MIT"
] | null | null | null |
test/test_shannon.py
|
dglmoore/pyinform
|
e2f3d4b3e0353b9cfa0f9111227336a2634980d9
|
[
"MIT"
] | null | null | null |
# Copyright 2016 ELIFE. All rights reserved.
# Use of this source code is governed by a MIT
# license that can be found in the LICENSE file.
import sys
import unittest
from ctypes import *
from math import isnan, log
from pyinform.shannon import *
class TestShannon(unittest.TestCase):
def test_entropy_invalid_dist(self):
d = Dist(5)
self.assertFalse(d.valid())
self.assertTrue(isnan(entropy(d)))
def test_entropy_delta_function(self):
d = Dist([0,1,0,0,0])
self.assertTrue(isnan(entropy(d, b=-1.0)))
self.assertTrue(isnan(entropy(d, b=-0.5)))
self.assertAlmostEqual(0.000000, entropy(d, b=0.0), places=6)
self.assertAlmostEqual(0.000000, entropy(d, b=0.5), places=6)
self.assertAlmostEqual(0.000000, entropy(d, b=1.5), places=6)
self.assertAlmostEqual(0.000000, entropy(d, b=2), places=6)
self.assertAlmostEqual(0.000000, entropy(d, b=3), places=6)
self.assertAlmostEqual(0.000000, entropy(d, b=4), places=6)
def test_entropy_uniform(self):
d = Dist([1,1,1,1,1])
self.assertTrue(isnan(entropy(d, b=-1.0)))
self.assertTrue(isnan(entropy(d, b=-0.5)))
self.assertAlmostEqual( 0.000000, entropy(d, b=0.0), places=6)
self.assertAlmostEqual(-2.321928, entropy(d, b=0.5), places=6)
self.assertAlmostEqual( 3.969362, entropy(d, b=1.5), places=6)
self.assertAlmostEqual( 2.321928, entropy(d, b=2), places=6)
self.assertAlmostEqual( 1.464974, entropy(d, b=3), places=6)
self.assertAlmostEqual( 1.160964, entropy(d, b=4), places=6)
def test_entropy_nonuniform(self):
d = Dist([2,1])
self.assertTrue(isnan(entropy(d, b=-1.0)))
self.assertTrue(isnan(entropy(d, b=-0.5)))
self.assertAlmostEqual( 0.000000, entropy(d, b=0.0), places=6)
self.assertAlmostEqual(-0.918296, entropy(d, b=0.5), places=6)
self.assertAlmostEqual( 1.569837, entropy(d, b=1.5), places=6)
self.assertAlmostEqual( 0.918296, entropy(d, b=2), places=6)
self.assertAlmostEqual( 0.579380, entropy(d, b=3), places=6)
self.assertAlmostEqual( 0.459148, entropy(d, b=4), places=6)
d = Dist([1,1,0])
self.assertTrue(isnan(entropy(d, b=-1.0)))
self.assertTrue(isnan(entropy(d, b=-0.5)))
self.assertAlmostEqual( 0.000000, entropy(d, b=0.0), places=6)
self.assertAlmostEqual(-1.000000, entropy(d, b=0.5), places=6)
self.assertAlmostEqual( 1.709511, entropy(d, b=1.5), places=6)
self.assertAlmostEqual( 1.000000, entropy(d, b=2), places=6)
self.assertAlmostEqual( 0.630930, entropy(d, b=3), places=6)
self.assertAlmostEqual( 0.500000, entropy(d, b=4), places=6)
d = Dist([2,2,1])
self.assertTrue(isnan(entropy(d, b=-1.0)))
self.assertTrue(isnan(entropy(d, b=-0.5)))
self.assertAlmostEqual( 0.000000, entropy(d, b=0.0), places=6)
self.assertAlmostEqual(-1.521928, entropy(d, b=0.5), places=6)
self.assertAlmostEqual( 2.601753, entropy(d, b=1.5), places=6)
self.assertAlmostEqual( 1.521928, entropy(d, b=2), places=6)
self.assertAlmostEqual( 0.960230, entropy(d, b=3), places=6)
self.assertAlmostEqual( 0.760964, entropy(d, b=4), places=6)
def test_conditional_entropy_invalid_dist(self):
invalid = Dist(5)
a = Dist([1,2,3,4])
self.assertTrue(isnan(conditional_entropy(invalid, a)))
self.assertTrue(isnan(conditional_entropy(a, invalid)))
def test_conditional_entropy_independent(self):
x = Dist([5,2,3,5,1,4,6,2,1,4,2,4])
y = Dist([2,4,5,2,7,3,9,8,8,7,2,3])
joint = Dist(len(x)*len(y))
for i in range(len(x)):
for j in range(len(y)):
joint[i*len(y) + j] = x[i] * y[j]
self.assertTrue(isnan(conditional_entropy(joint, x, b=-1.0)))
self.assertTrue(isnan(conditional_entropy(joint, x, b=-0.5)))
self.assertAlmostEqual( 0.00000, conditional_entropy(joint, x, b=0.0), places=6)
self.assertAlmostEqual(-3.391029, conditional_entropy(joint, x, b=0.5), places=6)
self.assertAlmostEqual( 5.797002, conditional_entropy(joint, x, b=1.5), places=6)
self.assertAlmostEqual( 3.391029, conditional_entropy(joint, x, b=2), places=6)
self.assertAlmostEqual( 2.139501, conditional_entropy(joint, x, b=3), places=6)
self.assertAlmostEqual( 1.695514, conditional_entropy(joint, x, b=4), places=6)
self.assertTrue(isnan(conditional_entropy(joint, y, b=-1.0)))
self.assertTrue(isnan(conditional_entropy(joint, y, b=-0.5)))
self.assertAlmostEqual( 0.00000, conditional_entropy(joint, y, b=0.0), places=6)
self.assertAlmostEqual(-3.401199, conditional_entropy(joint, y, b=0.5), places=6)
self.assertAlmostEqual( 5.814387, conditional_entropy(joint, y, b=1.5), places=6)
self.assertAlmostEqual( 3.401199, conditional_entropy(joint, y, b=2), places=6)
self.assertAlmostEqual( 2.145917, conditional_entropy(joint, y, b=3), places=6)
self.assertAlmostEqual( 1.700599, conditional_entropy(joint, y, b=4), places=6)
def test_conditional_entropy_dependent(self):
joint = Dist([10,70,15,5])
x = Dist([80,20])
y = Dist([25,75])
self.assertTrue(isnan(conditional_entropy(joint, x, b=-1.0)))
self.assertTrue(isnan(conditional_entropy(joint, x, b=-0.5)))
self.assertAlmostEqual( 0.000000, conditional_entropy(joint, x, b=0.0), places=6)
self.assertAlmostEqual(-0.597107, conditional_entropy(joint, x, b=0.5), places=6)
self.assertAlmostEqual( 1.020761, conditional_entropy(joint, x, b=1.5), places=6)
self.assertAlmostEqual( 0.597107, conditional_entropy(joint, x, b=2), places=6)
self.assertAlmostEqual( 0.376733, conditional_entropy(joint, x, b=3), places=6)
self.assertAlmostEqual( 0.298554, conditional_entropy(joint, x, b=4), places=6)
self.assertTrue(isnan(conditional_entropy(joint, y, b=-1.0)))
self.assertTrue(isnan(conditional_entropy(joint, y, b=-0.5)))
self.assertAlmostEqual( 0.000000, conditional_entropy(joint, y, b=0.0), places=6)
self.assertAlmostEqual(-0.507757, conditional_entropy(joint, y, b=0.5), places=6)
self.assertAlmostEqual( 0.868017, conditional_entropy(joint, y, b=1.5), places=6)
self.assertAlmostEqual( 0.507757, conditional_entropy(joint, y, b=2), places=6)
self.assertAlmostEqual( 0.320359, conditional_entropy(joint, y, b=3), places=6)
self.assertAlmostEqual( 0.253879, conditional_entropy(joint, y, b=4), places=6)
def test_relative_entropy_invalid(self):
p = Dist(5)
q = Dist(5)
self.assertTrue(isnan(relative_entropy(p,q)))
p.tick(0)
self.assertTrue(isnan(relative_entropy(p,q)))
self.assertTrue(isnan(relative_entropy(q,p)))
def test_relative_entropy_sizes(self):
p = Dist(5)
p.tick(0)
q = Dist(4)
p.tick(1)
self.assertTrue(isnan(relative_entropy(p,q)))
self.assertTrue(isnan(relative_entropy(q,p)))
def test_relative_entropy_undefined(self):
p = Dist([1,1,1,1,1])
q = Dist([1,1,1,2,0])
self.assertTrue(isnan(relative_entropy(p,q)))
self.assertFalse(isnan(relative_entropy(q,p)))
def test_relative_entropy_same_dist(self):
p = Dist(np.random.randint(0, 100, 20))
self.assertTrue(isnan(relative_entropy(p, p, -1.0)))
self.assertTrue(isnan(relative_entropy(p, p, -0.5)))
self.assertAlmostEqual(0.000000, relative_entropy(p, p, 0.0), 1e-6)
self.assertAlmostEqual(0.000000, relative_entropy(p, p, 0.5), 1e-6)
self.assertAlmostEqual(0.000000, relative_entropy(p, p, 1.5), 1e-6)
self.assertAlmostEqual(0.000000, relative_entropy(p, p, 2.0), 1e-6)
self.assertAlmostEqual(0.000000, relative_entropy(p, p, 3.0), 1e-6)
self.assertAlmostEqual(0.000000, relative_entropy(p, p, 4.0), 1e-6)
def test_relative_entropy(self):
p = Dist([1,0,0])
q = Dist([1,1,1])
for b in np.arange(2.0, 4.0, 0.5):
self.assertAlmostEqual(log(3., b), relative_entropy(p, q, b))
p = Dist([1,1,0])
for b in np.arange(2.0, 4.0, 0.5):
self.assertAlmostEqual(log(3./2., b), relative_entropy(p, q, b))
p = Dist([2,2,1])
for b in np.arange(2.0, 4.0, 0.5):
self.assertAlmostEqual((4.*log(6./5., b) + log(3./5., b))/5., relative_entropy(p, q, b))
q = Dist([1,2,2])
for b in np.arange(2.0, 4.0, 0.5):
self.assertAlmostEqual(log(2., b)/5., relative_entropy(p, q, b))
p = Dist([1,0,0])
q = Dist([4,1,0])
for b in np.arange(2.0, 4.0, 0.5):
self.assertAlmostEqual(log(5./4., b), relative_entropy(p, q, b))
q = Dist([1,4,0])
for b in np.arange(2.0, 4.0, 0.5):
self.assertAlmostEqual(log(5., b), relative_entropy(p, q, b))
if __name__ == "__main__":
unittest.main()
| 47.805263
| 100
| 0.633821
| 1,386
| 9,083
| 4.082973
| 0.094517
| 0.24492
| 0.194381
| 0.222654
| 0.839017
| 0.812158
| 0.804913
| 0.77982
| 0.727337
| 0.620428
| 0
| 0.107903
| 0.203127
| 9,083
| 189
| 101
| 48.058201
| 0.673943
| 0.014753
| 0
| 0.259494
| 0
| 0
| 0.000894
| 0
| 0
| 0
| 0
| 0
| 0.613924
| 1
| 0.075949
| false
| 0
| 0.031646
| 0
| 0.113924
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
9105bf68b24c821965e39f52c2278d9ae3a8ceb3
| 101
|
py
|
Python
|
api/app/users/__init__.py
|
yunfei07/vue-flask-in-action
|
8695f9a252bb3e2136609f421e02a0d3f01c0e58
|
[
"MIT"
] | 1
|
2021-11-13T15:54:43.000Z
|
2021-11-13T15:54:43.000Z
|
api/app/users/__init__.py
|
yunfei07/vue-flask-in-action
|
8695f9a252bb3e2136609f421e02a0d3f01c0e58
|
[
"MIT"
] | null | null | null |
api/app/users/__init__.py
|
yunfei07/vue-flask-in-action
|
8695f9a252bb3e2136609f421e02a0d3f01c0e58
|
[
"MIT"
] | 1
|
2021-11-13T00:57:20.000Z
|
2021-11-13T00:57:20.000Z
|
from flask import Blueprint
users_bp = Blueprint('users_bp',__name__)
from app.users import routes
| 16.833333
| 41
| 0.80198
| 15
| 101
| 5
| 0.6
| 0.373333
| 0.426667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128713
| 101
| 5
| 42
| 20.2
| 0.852273
| 0
| 0
| 0
| 0
| 0
| 0.079208
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
911fa5cb9899fef138d8e9e1ef6784e3e48261b6
| 18,586
|
py
|
Python
|
tests/unit/test_git_utils.py
|
eitansela/sagemaker-python-sdk
|
aa54102b5113b1d39bbbd4d9d341775f84641681
|
[
"Apache-2.0"
] | 1
|
2021-07-22T00:23:51.000Z
|
2021-07-22T00:23:51.000Z
|
tests/unit/test_git_utils.py
|
eitansela/sagemaker-python-sdk
|
aa54102b5113b1d39bbbd4d9d341775f84641681
|
[
"Apache-2.0"
] | 24
|
2021-05-18T07:10:27.000Z
|
2021-05-28T13:36:51.000Z
|
tests/unit/test_git_utils.py
|
eitansela/sagemaker-python-sdk
|
aa54102b5113b1d39bbbd4d9d341775f84641681
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import pytest
import os
import subprocess
from mock import patch, ANY
from sagemaker import git_utils
REPO_DIR = "/tmp/repo_dir"
PUBLIC_GIT_REPO = "https://github.com/aws/sagemaker-python-sdk.git"
PUBLIC_BRANCH = "test-branch-git-config"
PUBLIC_COMMIT = "ae15c9d7d5b97ea95ea451e4662ee43da3401d73"
PRIVATE_GIT_REPO_SSH = "git@github.com:testAccount/private-repo.git"
PRIVATE_GIT_REPO = "https://github.com/testAccount/private-repo.git"
PRIVATE_BRANCH = "test-branch"
PRIVATE_COMMIT = "329bfcf884482002c05ff7f44f62599ebc9f445a"
CODECOMMIT_REPO = "https://git-codecommit.us-west-2.amazonaws.com/v1/repos/test-repo/"
CODECOMMIT_REPO_SSH = "ssh://git-codecommit.us-west-2.amazonaws.com/v1/repos/test-repo/"
CODECOMMIT_BRANCH = "master"
@patch("subprocess.check_call")
@patch("tempfile.mkdtemp", return_value=REPO_DIR)
@patch("os.path.isfile", return_value=True)
@patch("os.path.isdir", return_value=True)
@patch("os.path.exists", return_value=True)
def test_git_clone_repo_succeed(exists, isdir, isfile, mkdtemp, check_call):
git_config = {"repo": PUBLIC_GIT_REPO, "branch": PUBLIC_BRANCH, "commit": PUBLIC_COMMIT}
entry_point = "entry_point"
source_dir = "source_dir"
dependencies = ["foo", "bar"]
env = os.environ.copy()
env["GIT_TERMINAL_PROMPT"] = "0"
ret = git_utils.git_clone_repo(git_config, entry_point, source_dir, dependencies)
check_call.assert_any_call(["git", "clone", git_config["repo"], REPO_DIR], env=env)
check_call.assert_any_call(args=["git", "checkout", PUBLIC_BRANCH], cwd=REPO_DIR)
check_call.assert_any_call(args=["git", "checkout", PUBLIC_COMMIT], cwd=REPO_DIR)
mkdtemp.assert_called_once()
assert ret["entry_point"] == "entry_point"
assert ret["source_dir"] == "/tmp/repo_dir/source_dir"
assert ret["dependencies"] == ["/tmp/repo_dir/foo", "/tmp/repo_dir/bar"]
def test_git_clone_repo_repo_not_provided():
git_config = {"branch": PUBLIC_BRANCH, "commit": PUBLIC_COMMIT}
entry_point = "entry_point_that_does_not_exist"
source_dir = "source_dir"
dependencies = ["foo", "bar"]
with pytest.raises(ValueError) as error:
git_utils.git_clone_repo(git_config, entry_point, source_dir, dependencies)
assert "Please provide a repo for git_config." in str(error)
def test_git_clone_repo_git_argument_wrong_format():
git_config = {
"repo": PUBLIC_GIT_REPO,
"branch": PUBLIC_BRANCH,
"commit": PUBLIC_COMMIT,
"token": 42,
}
entry_point = "entry_point"
source_dir = "source_dir"
dependencies = ["foo", "bar"]
env = os.environ.copy()
env["GIT_TERMINAL_PROMPT"] = "0"
with pytest.raises(ValueError) as error:
git_utils.git_clone_repo(git_config, entry_point, source_dir, dependencies)
assert "'token' must be a string." in str(error)
@patch(
"subprocess.check_call",
side_effect=subprocess.CalledProcessError(
returncode=1, cmd="git clone {} {}".format(PUBLIC_GIT_REPO, REPO_DIR)
),
)
@patch("tempfile.mkdtemp", return_value=REPO_DIR)
def test_git_clone_repo_clone_fail(mkdtemp, check_call):
git_config = {"repo": PUBLIC_GIT_REPO, "branch": PUBLIC_BRANCH, "commit": PUBLIC_COMMIT}
entry_point = "entry_point"
source_dir = "source_dir"
dependencies = ["foo", "bar"]
with pytest.raises(subprocess.CalledProcessError) as error:
git_utils.git_clone_repo(git_config, entry_point, source_dir, dependencies)
assert "returned non-zero exit status" in str(error.value)
@patch(
"subprocess.check_call",
side_effect=[True, subprocess.CalledProcessError(returncode=1, cmd="git checkout banana")],
)
@patch("tempfile.mkdtemp", return_value=REPO_DIR)
def test_git_clone_repo_branch_not_exist(mkdtemp, check_call):
git_config = {"repo": PUBLIC_GIT_REPO, "branch": PUBLIC_BRANCH, "commit": PUBLIC_COMMIT}
entry_point = "entry_point"
source_dir = "source_dir"
dependencies = ["foo", "bar"]
with pytest.raises(subprocess.CalledProcessError) as error:
git_utils.git_clone_repo(git_config, entry_point, source_dir, dependencies)
assert "returned non-zero exit status" in str(error.value)
@patch(
"subprocess.check_call",
side_effect=[
True,
True,
subprocess.CalledProcessError(returncode=1, cmd="git checkout {}".format(PUBLIC_COMMIT)),
],
)
@patch("tempfile.mkdtemp", return_value=REPO_DIR)
def test_git_clone_repo_commit_not_exist(mkdtemp, check_call):
git_config = {"repo": PUBLIC_GIT_REPO, "branch": PUBLIC_BRANCH, "commit": PUBLIC_COMMIT}
entry_point = "entry_point"
source_dir = "source_dir"
dependencies = ["foo", "bar"]
with pytest.raises(subprocess.CalledProcessError) as error:
git_utils.git_clone_repo(git_config, entry_point, source_dir, dependencies)
assert "returned non-zero exit status" in str(error.value)
@patch("subprocess.check_call")
@patch("tempfile.mkdtemp", return_value=REPO_DIR)
@patch("os.path.isfile", return_value=False)
@patch("os.path.isdir", return_value=True)
@patch("os.path.exists", return_value=True)
def test_git_clone_repo_entry_point_not_exist(exists, isdir, isfile, mkdtemp, heck_call):
git_config = {"repo": PUBLIC_GIT_REPO, "branch": PUBLIC_BRANCH, "commit": PUBLIC_COMMIT}
entry_point = "entry_point_that_does_not_exist"
source_dir = "source_dir"
dependencies = ["foo", "bar"]
with pytest.raises(ValueError) as error:
git_utils.git_clone_repo(git_config, entry_point, source_dir, dependencies)
assert "Entry point does not exist in the repo." in str(error)
@patch("subprocess.check_call")
@patch("tempfile.mkdtemp", return_value=REPO_DIR)
@patch("os.path.isfile", return_value=True)
@patch("os.path.isdir", return_value=False)
@patch("os.path.exists", return_value=True)
def test_git_clone_repo_source_dir_not_exist(exists, isdir, isfile, mkdtemp, check_call):
git_config = {"repo": PUBLIC_GIT_REPO, "branch": PUBLIC_BRANCH, "commit": PUBLIC_COMMIT}
entry_point = "entry_point"
source_dir = "source_dir_that_does_not_exist"
dependencies = ["foo", "bar"]
with pytest.raises(ValueError) as error:
git_utils.git_clone_repo(git_config, entry_point, source_dir, dependencies)
assert "Source directory does not exist in the repo." in str(error)
@patch("subprocess.check_call")
@patch("tempfile.mkdtemp", return_value=REPO_DIR)
@patch("os.path.isfile", return_value=True)
@patch("os.path.isdir", return_value=True)
@patch("os.path.exists", side_effect=[True, False])
def test_git_clone_repo_dependencies_not_exist(exists, isdir, isfile, mkdtemp, check_call):
git_config = {"repo": PUBLIC_GIT_REPO, "branch": PUBLIC_BRANCH, "commit": PUBLIC_COMMIT}
entry_point = "entry_point"
source_dir = "source_dir"
dependencies = ["foo", "dep_that_does_not_exist"]
with pytest.raises(ValueError) as error:
git_utils.git_clone_repo(git_config, entry_point, source_dir, dependencies)
assert "does not exist in the repo." in str(error)
@patch("subprocess.check_call")
@patch("tempfile.mkdtemp", return_value=REPO_DIR)
@patch("os.path.isfile", return_value=True)
def test_git_clone_repo_with_username_password_no_2fa(isfile, mkdtemp, check_call):
git_config = {
"repo": PRIVATE_GIT_REPO,
"branch": PRIVATE_BRANCH,
"commit": PRIVATE_COMMIT,
"username": "username",
"password": "passw0rd!",
}
entry_point = "entry_point"
env = os.environ.copy()
env["GIT_TERMINAL_PROMPT"] = "0"
ret = git_utils.git_clone_repo(git_config=git_config, entry_point=entry_point)
check_call.assert_any_call(
[
"git",
"clone",
"https://username:passw0rd%21@github.com/testAccount/private-repo.git",
REPO_DIR,
],
env=env,
)
check_call.assert_any_call(args=["git", "checkout", PRIVATE_BRANCH], cwd=REPO_DIR)
check_call.assert_any_call(args=["git", "checkout", PRIVATE_COMMIT], cwd=REPO_DIR)
assert ret["entry_point"] == "/tmp/repo_dir/entry_point"
assert ret["source_dir"] is None
assert ret["dependencies"] is None
@patch("subprocess.check_call")
@patch("tempfile.mkdtemp", return_value=REPO_DIR)
@patch("os.path.isfile", return_value=True)
def test_git_clone_repo_with_token_no_2fa(isfile, mkdtemp, check_call):
git_config = {
"repo": PRIVATE_GIT_REPO,
"branch": PRIVATE_BRANCH,
"commit": PRIVATE_COMMIT,
"token": "my-token",
"2FA_enabled": False,
}
entry_point = "entry_point"
env = os.environ.copy()
env["GIT_TERMINAL_PROMPT"] = "0"
ret = git_utils.git_clone_repo(git_config=git_config, entry_point=entry_point)
check_call.assert_any_call(
["git", "clone", "https://my-token@github.com/testAccount/private-repo.git", REPO_DIR],
env=env,
)
check_call.assert_any_call(args=["git", "checkout", PRIVATE_BRANCH], cwd=REPO_DIR)
check_call.assert_any_call(args=["git", "checkout", PRIVATE_COMMIT], cwd=REPO_DIR)
assert ret["entry_point"] == "/tmp/repo_dir/entry_point"
assert ret["source_dir"] is None
assert ret["dependencies"] is None
@patch("subprocess.check_call")
@patch("tempfile.mkdtemp", return_value=REPO_DIR)
@patch("os.path.isfile", return_value=True)
def test_git_clone_repo_with_token_2fa(isfile, mkdtemp, check_call):
git_config = {
"repo": PRIVATE_GIT_REPO,
"branch": PRIVATE_BRANCH,
"commit": PRIVATE_COMMIT,
"2FA_enabled": True,
"username": "username",
"token": "my-token",
}
entry_point = "entry_point"
env = os.environ.copy()
env["GIT_TERMINAL_PROMPT"] = "0"
ret = git_utils.git_clone_repo(git_config=git_config, entry_point=entry_point)
check_call.assert_any_call(
["git", "clone", "https://my-token@github.com/testAccount/private-repo.git", REPO_DIR],
env=env,
)
check_call.assert_any_call(args=["git", "checkout", PRIVATE_BRANCH], cwd=REPO_DIR)
check_call.assert_any_call(args=["git", "checkout", PRIVATE_COMMIT], cwd=REPO_DIR)
assert ret["entry_point"] == "/tmp/repo_dir/entry_point"
assert ret["source_dir"] is None
assert ret["dependencies"] is None
@patch("subprocess.check_call")
@patch("os.chmod")
@patch("tempfile.mkdtemp", return_value=REPO_DIR)
@patch("os.path.isfile", return_value=True)
def test_git_clone_repo_ssh(isfile, mkdtemp, chmod, check_call):
git_config = {"repo": PRIVATE_GIT_REPO_SSH, "branch": PRIVATE_BRANCH, "commit": PRIVATE_COMMIT}
entry_point = "entry_point"
ret = git_utils.git_clone_repo(git_config, entry_point)
chmod.assert_any_call(ANY, 0o511)
assert ret["entry_point"] == "/tmp/repo_dir/entry_point"
assert ret["source_dir"] is None
assert ret["dependencies"] is None
@patch("subprocess.check_call")
@patch("tempfile.mkdtemp", return_value=REPO_DIR)
@patch("os.path.isfile", return_value=True)
def test_git_clone_repo_with_token_no_2fa_unnecessary_creds_provided(isfile, mkdtemp, check_call):
git_config = {
"repo": PRIVATE_GIT_REPO,
"branch": PRIVATE_BRANCH,
"commit": PRIVATE_COMMIT,
"username": "username",
"password": "passw0rd!",
"token": "my-token",
}
entry_point = "entry_point"
env = os.environ.copy()
env["GIT_TERMINAL_PROMPT"] = "0"
with pytest.warns(UserWarning) as warn:
ret = git_utils.git_clone_repo(git_config=git_config, entry_point=entry_point)
assert (
"Using token for authentication, other credentials will be ignored."
in warn[0].message.args[0]
)
check_call.assert_any_call(
["git", "clone", "https://my-token@github.com/testAccount/private-repo.git", REPO_DIR],
env=env,
)
check_call.assert_any_call(args=["git", "checkout", PRIVATE_BRANCH], cwd=REPO_DIR)
check_call.assert_any_call(args=["git", "checkout", PRIVATE_COMMIT], cwd=REPO_DIR)
assert ret["entry_point"] == "/tmp/repo_dir/entry_point"
assert ret["source_dir"] is None
assert ret["dependencies"] is None
@patch("subprocess.check_call")
@patch("tempfile.mkdtemp", return_value=REPO_DIR)
@patch("os.path.isfile", return_value=True)
def test_git_clone_repo_with_token_2fa_unnecessary_creds_provided(isfile, mkdtemp, check_call):
git_config = {
"repo": PRIVATE_GIT_REPO,
"branch": PRIVATE_BRANCH,
"commit": PRIVATE_COMMIT,
"2FA_enabled": True,
"username": "username",
"token": "my-token",
}
entry_point = "entry_point"
env = os.environ.copy()
env["GIT_TERMINAL_PROMPT"] = "0"
with pytest.warns(UserWarning) as warn:
ret = git_utils.git_clone_repo(git_config=git_config, entry_point=entry_point)
assert (
"Using token for authentication, other credentials will be ignored."
in warn[0].message.args[0]
)
check_call.assert_any_call(
["git", "clone", "https://my-token@github.com/testAccount/private-repo.git", REPO_DIR],
env=env,
)
check_call.assert_any_call(args=["git", "checkout", PRIVATE_BRANCH], cwd=REPO_DIR)
check_call.assert_any_call(args=["git", "checkout", PRIVATE_COMMIT], cwd=REPO_DIR)
assert ret["entry_point"] == "/tmp/repo_dir/entry_point"
assert ret["source_dir"] is None
assert ret["dependencies"] is None
@patch(
"subprocess.check_call",
side_effect=subprocess.CalledProcessError(
returncode=1, cmd="git clone {} {}".format(PRIVATE_GIT_REPO, REPO_DIR)
),
)
@patch("tempfile.mkdtemp", return_value=REPO_DIR)
def test_git_clone_repo_with_username_and_password_wrong_creds(mkdtemp, check_call):
git_config = {
"repo": PRIVATE_GIT_REPO,
"branch": PRIVATE_BRANCH,
"commit": PRIVATE_COMMIT,
"2FA_enabled": False,
"username": "username",
"password": "wrong-password",
}
entry_point = "entry_point"
env = os.environ.copy()
env["GIT_TERMINAL_PROMPT"] = "0"
with pytest.raises(subprocess.CalledProcessError) as error:
git_utils.git_clone_repo(git_config=git_config, entry_point=entry_point)
assert "returned non-zero exit status" in str(error.value)
@patch(
"subprocess.check_call",
side_effect=subprocess.CalledProcessError(
returncode=1, cmd="git clone {} {}".format(PRIVATE_GIT_REPO, REPO_DIR)
),
)
@patch("tempfile.mkdtemp", return_value=REPO_DIR)
def test_git_clone_repo_with_token_wrong_creds(mkdtemp, check_call):
git_config = {
"repo": PRIVATE_GIT_REPO,
"branch": PRIVATE_BRANCH,
"commit": PRIVATE_COMMIT,
"2FA_enabled": False,
"token": "wrong-token",
}
entry_point = "entry_point"
env = os.environ.copy()
env["GIT_TERMINAL_PROMPT"] = "0"
with pytest.raises(subprocess.CalledProcessError) as error:
git_utils.git_clone_repo(git_config=git_config, entry_point=entry_point)
assert "returned non-zero exit status" in str(error.value)
@patch(
"subprocess.check_call",
side_effect=subprocess.CalledProcessError(
returncode=1, cmd="git clone {} {}".format(PRIVATE_GIT_REPO, REPO_DIR)
),
)
@patch("tempfile.mkdtemp", return_value=REPO_DIR)
def test_git_clone_repo_with_and_token_2fa_wrong_creds(mkdtemp, check_call):
git_config = {
"repo": PRIVATE_GIT_REPO,
"branch": PRIVATE_BRANCH,
"commit": PRIVATE_COMMIT,
"2FA_enabled": False,
"token": "wrong-token",
}
entry_point = "entry_point"
env = os.environ.copy()
env["GIT_TERMINAL_PROMPT"] = "0"
with pytest.raises(subprocess.CalledProcessError) as error:
git_utils.git_clone_repo(git_config=git_config, entry_point=entry_point)
assert "returned non-zero exit status" in str(error.value)
@patch("subprocess.check_call")
@patch("tempfile.mkdtemp", return_value=REPO_DIR)
@patch("os.path.isfile", return_value=True)
def test_git_clone_repo_codecommit_https_with_username_and_password(isfile, mkdtemp, check_call):
git_config = {
"repo": CODECOMMIT_REPO,
"branch": CODECOMMIT_BRANCH,
"username": "username",
"password": "my-codecommit-password",
}
entry_point = "entry_point"
env = os.environ.copy()
env["GIT_TERMINAL_PROMPT"] = "0"
ret = git_utils.git_clone_repo(git_config=git_config, entry_point=entry_point)
check_call.assert_any_call(
[
"git",
"clone",
"https://username:my-codecommit-password@git-codecommit.us-west-2.amazonaws.com/v1/repos/test-repo/",
REPO_DIR,
],
env=env,
)
check_call.assert_any_call(args=["git", "checkout", CODECOMMIT_BRANCH], cwd=REPO_DIR)
assert ret["entry_point"] == "/tmp/repo_dir/entry_point"
assert ret["source_dir"] is None
assert ret["dependencies"] is None
@patch(
"subprocess.check_call",
side_effect=subprocess.CalledProcessError(
returncode=128, cmd="git clone {} {}".format(CODECOMMIT_REPO_SSH, REPO_DIR)
),
)
@patch("tempfile.mkdtemp", return_value=REPO_DIR)
def test_git_clone_repo_codecommit_ssh_passphrase_required(mkdtemp, check_call):
git_config = {"repo": CODECOMMIT_REPO_SSH, "branch": CODECOMMIT_BRANCH}
entry_point = "entry_point"
with pytest.raises(subprocess.CalledProcessError) as error:
git_utils.git_clone_repo(git_config, entry_point)
assert "returned non-zero exit status" in str(error.value)
@patch(
"subprocess.check_call",
side_effect=subprocess.CalledProcessError(
returncode=128, cmd="git clone {} {}".format(CODECOMMIT_REPO, REPO_DIR)
),
)
@patch("tempfile.mkdtemp", return_value=REPO_DIR)
def test_git_clone_repo_codecommit_https_creds_not_stored_locally(mkdtemp, check_call):
git_config = {"repo": CODECOMMIT_REPO, "branch": CODECOMMIT_BRANCH}
entry_point = "entry_point"
with pytest.raises(subprocess.CalledProcessError) as error:
git_utils.git_clone_repo(git_config, entry_point)
assert "returned non-zero exit status" in str(error.value)
| 39.628998
| 113
| 0.709405
| 2,495
| 18,586
| 4.979559
| 0.076553
| 0.07244
| 0.040567
| 0.049903
| 0.878783
| 0.869124
| 0.860431
| 0.846829
| 0.831616
| 0.83065
| 0
| 0.007574
| 0.161788
| 18,586
| 468
| 114
| 39.713675
| 0.789909
| 0.029323
| 0
| 0.737226
| 0
| 0.007299
| 0.247337
| 0.052363
| 0
| 0
| 0
| 0
| 0.148418
| 1
| 0.051095
| false
| 0.024331
| 0.014599
| 0
| 0.065693
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9151a0bb7b7d6114baa671021737e15d06beee21
| 5,389
|
py
|
Python
|
src/ppopt/mp_solvers/mpqp_graph.py
|
AnhTran01/PPOPT
|
4f62ee5363100766a7524ca6bbe03ddd64b32b8d
|
[
"MIT"
] | null | null | null |
src/ppopt/mp_solvers/mpqp_graph.py
|
AnhTran01/PPOPT
|
4f62ee5363100766a7524ca6bbe03ddd64b32b8d
|
[
"MIT"
] | null | null | null |
src/ppopt/mp_solvers/mpqp_graph.py
|
AnhTran01/PPOPT
|
4f62ee5363100766a7524ca6bbe03ddd64b32b8d
|
[
"MIT"
] | null | null | null |
from typing import List
from settrie import SetTrie
from .solver_utils import generate_reduce, generate_extra
from ..mpqp_program import MPQP_Program
from ..solution import Solution
from ..utils.constraint_utilities import is_full_rank
from ..utils.mpqp_utils import gen_cr_from_active_set
def graph_initialization(program, initial_active_sets):
"""
Initializes the graph algorithm based on input
:param program:
:param initial_active_sets:
:return:
"""
if initial_active_sets is None:
initial_active_sets = program.sample_theta_space()
# This will contain all of the attempted active sets
attempted = set()
solution = Solution(program, [])
murder_list = SetTrie()
to_attempt = [tuple(a_set) for a_set in initial_active_sets]
to_attempt.append(tuple([]))
to_attempt.extend([tuple([i]) for i in range(len(program.equality_indices), program.num_constraints())])
if len(to_attempt) != 0:
print(f'First region {to_attempt[0]}')
else:
print('Failed to find an initial region!')
return attempted, solution, murder_list, to_attempt
def solve(program: MPQP_Program, initial_active_sets: List[List[int]] = None) -> Solution:
"""
Solves the MPQP program with a modified algorithm described in Oberdieck et. al. 2016
url: https://www.sciencedirect.com/science/article/pii/S0005109816303971
:param program: MPQP to be solved
:param initial_active_sets: An initial critical region to start this algorithm with, otherwise one will be found
:return: the solution of the MPQP
"""
# TODO: This still misses some Critical Regions. USE Geometric Repair?
attempted, solution, murder_list, to_attempt = graph_initialization(program, initial_active_sets)
while len(to_attempt) > 0:
# make sure I am grabbing from the lowest cardinality
to_attempt.sort(key=len)
# step 1: feasibility
candidate = to_attempt.pop(0)
# print(candidate)
if candidate in attempted:
continue
# print(f'Candidate {candidate}')
attempted.add(candidate)
# checks for infeasible subsets if so break and go to next candidate
if not is_full_rank(program.A, list(candidate)):
to_attempt.extend(generate_reduce(candidate, murder_list, attempted))
murder_list.add(candidate)
continue
if program.check_feasibility(list(candidate)) is None:
to_attempt.extend(generate_reduce(candidate, murder_list, attempted))
murder_list.add(candidate)
# print(f' MURDERED {candidate}')
continue
if not program.check_optimality(list(candidate)):
to_attempt.extend(generate_reduce(candidate, murder_list, attempted))
# not optimal do nothing with this
continue
region = gen_cr_from_active_set(program, list(candidate), check_full_dim=False)
if region is None:
continue
if region.is_full_dimension():
if set(tuple(program.equality_indices)).issuperset(candidate):
continue
solution.add_region(region)
to_attempt.extend(generate_reduce(candidate, murder_list, attempted))
to_attempt.extend(generate_extra(candidate, region.regular_set[1], murder_list, attempted))
return solution
def solve_no_murder(program: MPQP_Program, initial_active_sets: List[List[int]] = None) -> Solution:
"""
Solves the MPQP program with a modified algorithm described in Oberdieck et. al. 2016
url: https://www.sciencedirect.com/science/article/pii/S0005109816303971
:param program: MPQP to be solved
:param initial_active_sets: An initial critical region to start this algorithm with, otherwise one will be found
:return: the solution of the MPQP
"""
# TODO: This still misses some Critical Regions. USE Geometric Repair?
attempted, solution, _, to_attempt = graph_initialization(program, initial_active_sets)
while len(to_attempt) > 0:
# make sure I am grabbing from the lowest cardinality
to_attempt.sort(key=len)
# step 1: feasibility
candidate = to_attempt.pop(0)
# print(candidate)
if candidate in attempted:
continue
attempted.add(candidate)
if not is_full_rank(program.A, list(candidate)):
to_attempt.extend(generate_reduce(candidate, None, attempted))
continue
if program.check_feasibility(list(candidate)) is None:
to_attempt.extend(generate_reduce(candidate, None, attempted))
continue
if not program.check_optimality(list(candidate)):
to_attempt.extend(generate_reduce(candidate, None, attempted))
continue
region = gen_cr_from_active_set(program, list(candidate), check_full_dim=False)
if region is None:
print('region is shit')
continue
if region.is_full_dimension():
if set(tuple(program.equality_indices)).issuperset(candidate):
continue
solution.add_region(region)
to_attempt.extend(generate_reduce(candidate, None, attempted))
to_attempt.extend(generate_extra(candidate, region.regular_set[1], None, attempted))
return solution
| 31.887574
| 116
| 0.68213
| 665
| 5,389
| 5.342857
| 0.215038
| 0.060794
| 0.052632
| 0.064734
| 0.735435
| 0.730369
| 0.705319
| 0.705319
| 0.704194
| 0.701379
| 0
| 0.012171
| 0.237706
| 5,389
| 168
| 117
| 32.077381
| 0.852726
| 0.244387
| 0
| 0.641026
| 1
| 0
| 0.018892
| 0
| 0
| 0
| 0
| 0.011905
| 0
| 1
| 0.038462
| false
| 0
| 0.089744
| 0
| 0.166667
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e6a0bc3b38c48b4eb6db76a23fda69f44e1110a0
| 15,707
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowIsisRib/cli/equal/golden_output_5_expected.py
|
ykoehler/genieparser
|
b62cf622c3d8eab77c7b69e932c214ed04a2565a
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowIsisRib/cli/equal/golden_output_5_expected.py
|
ykoehler/genieparser
|
b62cf622c3d8eab77c7b69e932c214ed04a2565a
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowIsisRib/cli/equal/golden_output_5_expected.py
|
ykoehler/genieparser
|
b62cf622c3d8eab77c7b69e932c214ed04a2565a
|
[
"Apache-2.0"
] | null | null | null |
expected_output = {
"tag": {
"1": {
"topo_type": "unicast",
"topo_name": "base",
"tid": 0,
"topo_id": "0x0",
"flex_algo": {
"None": {
"prefix": {
"6.6.6.6": {
"prefix_attr": {
"x_flag": False,
"r_flag": False,
"n_flag": True
},
"subnet": "32",
"source_router_id": "6.6.6.6",
"algo": {
0: {
"sid_index": 61,
"bound": True,
"attribute": "SR_POLICY"
},
1: {}
},
"via_interface": {
"Tunnel65536": {
"level": {
"L2": {
"source_ip": {
"6.6.6.6": {
"lsp": {
"next_hop_lsp_index": 115,
"rtp_lsp_index": 115,
"rtp_lsp_version": 220
},
"distance": 115,
"metric": 50,
"via_ip": "6.6.6.6",
"tag": "0",
"filtered_out": False,
"prefix_attr": {
"x_flag": False,
"r_flag": False,
"n_flag": True
},
"source_router_id": "6.6.6.6",
"srgb_start": 100000,
"srgb_range": 30001,
"algo": {
0: {
"sid_index": 61,
"flags": {
"r_flag": False,
"n_flag": True,
"p_flag": False,
"e_flag": False,
"v_flag": False,
"l_flag": False
},
"label": "implicit-null"
}
},
"path_attribute": {
"ALT": False,
"SR_POLICY": True,
"SR_POLICY_STRICT": False,
"SRTE": False,
"SRTE_STRICT": False,
"ULOOP_EP": False,
"TE": False
},
"had_repair_path": False,
"installed": True
}
}
}
}
},
"Tunnel4001": {
"level": {
"L2": {
"source_ip": {
"6.6.6.6": {
"lsp": {
"next_hop_lsp_index": 2,
"rtp_lsp_index": 115,
"rtp_lsp_version": 220
},
"distance": 115,
"metric": 50,
"via_ip": "199.1.1.2",
"tag": "0",
"filtered_out": False,
"srgb_start": 100000,
"srgb_range": 30001,
"algo": {
0: {
"sid_index": 61,
"flags": {
"r_flag": False,
"n_flag": True,
"p_flag": False,
"e_flag": False,
"v_flag": False,
"l_flag": False
},
"label": "100061"
}
},
"path_attribute": {
"ALT": True,
"SR_POLICY": False,
"SR_POLICY_STRICT": False,
"SRTE": False,
"SRTE_STRICT": False,
"ULOOP_EP": False,
"TE": False
},
"had_repair_path": False,
"installed": True
}
}
}
}
},
"Tunnel4002": {
"level": {
"L2": {
"source_ip": {
"6.6.6.6": {
"lsp": {
"next_hop_lsp_index": 2,
"rtp_lsp_index": 115,
"rtp_lsp_version": 220
},
"distance": 115,
"metric": 50,
"via_ip": "199.1.2.2",
"tag": "0",
"filtered_out": False,
"srgb_start": 100000,
"srgb_range": 30001,
"algo": {
0: {
"sid_index": 61,
"flags": {
"r_flag": False,
"n_flag": True,
"p_flag": False,
"e_flag": False,
"v_flag": False,
"l_flag": False
},
"label": "100061"
}
},
"path_attribute": {
"ALT": True,
"SR_POLICY": False,
"SR_POLICY_STRICT": False,
"SRTE": False,
"SRTE_STRICT": False,
"ULOOP_EP": False,
"TE": False
},
"had_repair_path": False,
"installed": True
}
}
}
}
},
"GigabitEthernet0/3/1": {
"level": {
"L2": {
"source_ip": {
"6.6.6.6": {
"lsp": {
"next_hop_lsp_index": 3,
"rtp_lsp_index": 115,
"rtp_lsp_version": 220
},
"distance": 115,
"metric": 50,
"via_ip": "12.12.12.2",
"tag": "0",
"filtered_out": False,
"srgb_start": 100000,
"srgb_range": 30001,
"algo": {
0: {
"sid_index": 61,
"flags": {
"r_flag": False,
"n_flag": True,
"p_flag": False,
"e_flag": False,
"v_flag": False,
"l_flag": False
},
"label": "100061,"
}
},
"path_attribute": {
"ALT": True,
"SR_POLICY": False,
"SR_POLICY_STRICT": False,
"SRTE": False,
"SRTE_STRICT": False,
"ULOOP_EP": False,
"TE": False
},
"had_repair_path": False,
"installed": True,
"repair_path": {
"attributes": {
"DS": True,
"LC": True,
"NP": True,
"PP": True,
"SR": True
},
"ip": "199.1.2.2",
"interface": "Tunnel4002",
"metric": 50,
"stale": False,
"rtp_lsp_index": 115,
"lfa_type": "local LFA",
"algo": {
0: {
"label": "100061"
}
},
"repair_source": {
"host": "asr1k-24"
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
| 62.329365
| 84
| 0.131152
| 506
| 15,707
| 3.8083
| 0.189723
| 0.024909
| 0.024909
| 0.016606
| 0.800727
| 0.774261
| 0.774261
| 0.755579
| 0.755579
| 0.755579
| 0
| 0.078761
| 0.810849
| 15,707
| 252
| 85
| 62.329365
| 0.569842
| 0
| 0
| 0.603175
| 0
| 0
| 0.102177
| 0
| 0
| 0
| 0.000191
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fc12d71eac39a42113bc766cc74802d699c1e12a
| 25,019
|
py
|
Python
|
heatclient/tests/test_common_http.py
|
jasondunsmore/python-heatclient
|
13b4f82835fcf5dd0f206745ae13378796b4a029
|
[
"Apache-2.0"
] | null | null | null |
heatclient/tests/test_common_http.py
|
jasondunsmore/python-heatclient
|
13b4f82835fcf5dd0f206745ae13378796b4a029
|
[
"Apache-2.0"
] | null | null | null |
heatclient/tests/test_common_http.py
|
jasondunsmore/python-heatclient
|
13b4f82835fcf5dd0f206745ae13378796b4a029
|
[
"Apache-2.0"
] | null | null | null |
#-*- coding:utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import socket
import requests
import testtools
from heatclient.common import http
from heatclient import exc
from heatclient.tests import fakes
from mox3 import mox
class HttpClientTest(testtools.TestCase):
# Patch os.environ to avoid required auth info.
def setUp(self):
super(HttpClientTest, self).setUp()
self.m = mox.Mox()
self.m.StubOutWithMock(requests, 'request')
self.addCleanup(self.m.UnsetStubs)
self.addCleanup(self.m.ResetAll)
def test_http_raw_request(self):
headers = {'Content-Type': 'application/octet-stream',
'User-Agent': 'python-heatclient'}
# Record a 200
mock_conn = http.requests.request('GET', 'http://example.com:8004',
allow_redirects=False,
headers=headers)
mock_conn.AndReturn(
fakes.FakeHTTPResponse(
200, 'OK',
{'content-type': 'application/octet-stream'},
''))
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
resp = client.raw_request('GET', '')
self.assertEqual(200, resp.status_code)
self.assertEqual('', ''.join([x for x in resp.content]))
self.m.VerifyAll()
def test_token_or_credentials(self):
# Record a 200
fake200 = fakes.FakeHTTPResponse(
200, 'OK',
{'content-type': 'application/octet-stream'},
'')
# no token or credentials
mock_conn = http.requests.request(
'GET', 'http://example.com:8004',
allow_redirects=False,
headers={'Content-Type': 'application/octet-stream',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(fake200)
# credentials
mock_conn = http.requests.request(
'GET', 'http://example.com:8004',
allow_redirects=False,
headers={'Content-Type': 'application/octet-stream',
'User-Agent': 'python-heatclient',
'X-Auth-Key': 'pass',
'X-Auth-User': 'user'})
mock_conn.AndReturn(fake200)
# token suppresses credentials
mock_conn = http.requests.request(
'GET', 'http://example.com:8004',
allow_redirects=False,
headers={'Content-Type': 'application/octet-stream',
'User-Agent': 'python-heatclient',
'X-Auth-Token': 'abcd1234'})
mock_conn.AndReturn(fake200)
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
resp = client.raw_request('GET', '')
self.assertEqual(200, resp.status_code)
client.username = 'user'
client.password = 'pass'
resp = client.raw_request('GET', '')
self.assertEqual(200, resp.status_code)
client.auth_token = 'abcd1234'
resp = client.raw_request('GET', '')
self.assertEqual(200, resp.status_code)
self.m.VerifyAll()
def test_include_pass(self):
# Record a 200
fake200 = fakes.FakeHTTPResponse(
200, 'OK',
{'content-type': 'application/octet-stream'},
'')
# no token or credentials
mock_conn = http.requests.request(
'GET', 'http://example.com:8004',
allow_redirects=False,
headers={'Content-Type': 'application/octet-stream',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(fake200)
# credentials
mock_conn = http.requests.request(
'GET', 'http://example.com:8004',
allow_redirects=False,
headers={'Content-Type': 'application/octet-stream',
'User-Agent': 'python-heatclient',
'X-Auth-Key': 'pass',
'X-Auth-User': 'user'})
mock_conn.AndReturn(fake200)
# token suppresses credentials
mock_conn = http.requests.request(
'GET', 'http://example.com:8004',
allow_redirects=False,
headers={'Content-Type': 'application/octet-stream',
'User-Agent': 'python-heatclient',
'X-Auth-Token': 'abcd1234',
'X-Auth-Key': 'pass',
'X-Auth-User': 'user'})
mock_conn.AndReturn(fake200)
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
resp = client.raw_request('GET', '')
self.assertEqual(200, resp.status_code)
client.username = 'user'
client.password = 'pass'
client.include_pass = True
resp = client.raw_request('GET', '')
self.assertEqual(200, resp.status_code)
client.auth_token = 'abcd1234'
resp = client.raw_request('GET', '')
self.assertEqual(200, resp.status_code)
self.m.VerifyAll()
def test_not_include_pass(self):
# Record a 200
fake500 = fakes.FakeHTTPResponse(
500, 'ERROR',
{'content-type': 'application/octet-stream'},
'(HTTP 401)')
# no token or credentials
mock_conn = http.requests.request(
'GET', 'http://example.com:8004',
allow_redirects=False,
headers={'Content-Type': 'application/octet-stream',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(fake500)
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
e = self.assertRaises(exc.HTTPUnauthorized,
client.raw_request, 'GET', '')
self.assertIn('include-password', str(e))
def test_region_name(self):
# Record a 200
fake200 = fakes.FakeHTTPResponse(
200, 'OK',
{'content-type': 'application/octet-stream'},
'')
# Specify region name
mock_conn = http.requests.request(
'GET', 'http://example.com:8004',
allow_redirects=False,
headers={'Content-Type': 'application/octet-stream',
'X-Region-Name': 'RegionOne',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(fake200)
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
client.region_name = 'RegionOne'
resp = client.raw_request('GET', '')
self.assertEqual(200, resp.status_code)
self.m.VerifyAll()
def test_http_json_request(self):
# Record a 200
mock_conn = http.requests.request(
'GET', 'http://example.com:8004',
allow_redirects=False,
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(
fakes.FakeHTTPResponse(
200, 'OK',
{'content-type': 'application/json'},
'{}'))
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
resp, body = client.json_request('GET', '')
self.assertEqual(200, resp.status_code)
self.assertEqual({}, body)
self.m.VerifyAll()
def test_http_json_request_argument_passed_to_requests(self):
"""Check that we have sent the proper arguments to requests."""
# Record a 200
mock_conn = http.requests.request(
'GET', 'http://example.com:8004',
allow_redirects=False,
cert=('RANDOM_CERT_FILE', 'RANDOM_KEY_FILE'),
verify=True,
data='"text"',
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'X-Auth-Url': 'http://AUTH_URL',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(
fakes.FakeHTTPResponse(
200, 'OK',
{'content-type': 'application/json'},
'{}'))
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
client.verify_cert = True
client.cert_file = 'RANDOM_CERT_FILE'
client.key_file = 'RANDOM_KEY_FILE'
client.auth_url = 'http://AUTH_URL'
resp, body = client.json_request('GET', '', data='text')
self.assertEqual(200, resp.status_code)
self.assertEqual({}, body)
self.m.VerifyAll()
def test_http_json_request_w_req_body(self):
# Record a 200
mock_conn = http.requests.request(
'GET', 'http://example.com:8004',
body='test-body',
allow_redirects=False,
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(
fakes.FakeHTTPResponse(
200, 'OK',
{'content-type': 'application/json'},
'{}'))
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
resp, body = client.json_request('GET', '', body='test-body')
self.assertEqual(200, resp.status_code)
self.assertEqual({}, body)
self.m.VerifyAll()
def test_http_json_request_non_json_resp_cont_type(self):
# Record a 200
mock_conn = http.requests.request(
'GET', 'http://example.com:8004', body='test-body',
allow_redirects=False,
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(
fakes.FakeHTTPResponse(
200, 'OK',
{'content-type': 'not/json'},
'{}'))
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
resp, body = client.json_request('GET', '', body='test-body')
self.assertEqual(200, resp.status_code)
self.assertIsNone(body)
self.m.VerifyAll()
def test_http_json_request_invalid_json(self):
# Record a 200
mock_conn = http.requests.request(
'GET', 'http://example.com:8004',
allow_redirects=False,
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(
fakes.FakeHTTPResponse(
200, 'OK',
{'content-type': 'application/json'},
'invalid-json'))
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
resp, body = client.json_request('GET', '')
self.assertEqual(200, resp.status_code)
self.assertEqual('invalid-json', body)
self.m.VerifyAll()
def test_http_manual_redirect_delete(self):
mock_conn = http.requests.request(
'DELETE', 'http://example.com:8004/foo',
allow_redirects=False,
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(
fakes.FakeHTTPResponse(
302, 'Found',
{'location': 'http://example.com:8004/foo/bar'},
''))
mock_conn = http.requests.request(
'DELETE', 'http://example.com:8004/foo/bar',
allow_redirects=False,
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(
fakes.FakeHTTPResponse(
200, 'OK',
{'content-type': 'application/json'},
'{}'))
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004/foo')
resp, body = client.json_request('DELETE', '')
self.assertEqual(200, resp.status_code)
self.m.VerifyAll()
def test_http_manual_redirect_post(self):
mock_conn = http.requests.request(
'POST', 'http://example.com:8004/foo',
allow_redirects=False,
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(
fakes.FakeHTTPResponse(
302, 'Found',
{'location': 'http://example.com:8004/foo/bar'},
''))
mock_conn = http.requests.request(
'POST', 'http://example.com:8004/foo/bar',
allow_redirects=False,
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(
fakes.FakeHTTPResponse(
200, 'OK',
{'content-type': 'application/json'},
'{}'))
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004/foo')
resp, body = client.json_request('POST', '')
self.assertEqual(200, resp.status_code)
self.m.VerifyAll()
def test_http_manual_redirect_put(self):
mock_conn = http.requests.request(
'PUT', 'http://example.com:8004/foo',
allow_redirects=False,
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(
fakes.FakeHTTPResponse(
302, 'Found',
{'location': 'http://example.com:8004/foo/bar'},
''))
mock_conn = http.requests.request(
'PUT', 'http://example.com:8004/foo/bar',
allow_redirects=False,
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(
fakes.FakeHTTPResponse(
200, 'OK',
{'content-type': 'application/json'},
'{}'))
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004/foo')
resp, body = client.json_request('PUT', '')
self.assertEqual(200, resp.status_code)
self.m.VerifyAll()
def test_http_manual_redirect_prohibited(self):
mock_conn = http.requests.request(
'DELETE', 'http://example.com:8004/foo',
allow_redirects=False,
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(
fakes.FakeHTTPResponse(
302, 'Found',
{'location': 'http://example.com:8004/'},
''))
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004/foo')
self.assertRaises(exc.InvalidEndpoint,
client.json_request, 'DELETE', '')
self.m.VerifyAll()
def test_http_manual_redirect_error_without_location(self):
mock_conn = http.requests.request(
'DELETE', 'http://example.com:8004/foo',
allow_redirects=False,
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(
fakes.FakeHTTPResponse(
302, 'Found',
{},
''))
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004/foo')
self.assertRaises(exc.InvalidEndpoint,
client.json_request, 'DELETE', '')
self.m.VerifyAll()
def test_http_json_request_redirect(self):
# Record the 302
mock_conn = http.requests.request(
'GET', 'http://example.com:8004',
allow_redirects=False,
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(
fakes.FakeHTTPResponse(
302, 'Found',
{'location': 'http://example.com:8004'},
''))
# Record the following 200
mock_conn = http.requests.request(
'GET', 'http://example.com:8004',
allow_redirects=False,
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(
fakes.FakeHTTPResponse(
200, 'OK',
{'content-type': 'application/json'},
'{}'))
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
resp, body = client.json_request('GET', '')
self.assertEqual(resp.status_code, 200)
self.assertEqual(body, {})
self.m.VerifyAll()
def test_http_404_json_request(self):
# Record a 404
mock_conn = http.requests.request(
'GET', 'http://example.com:8004',
allow_redirects=False,
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(
fakes.FakeHTTPResponse(
404, 'OK', {'content-type': 'application/json'},
'{}'))
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
e = self.assertRaises(exc.HTTPNotFound, client.json_request, 'GET', '')
# Assert that the raised exception can be converted to string
self.assertIsNotNone(str(e))
self.m.VerifyAll()
def test_http_300_json_request(self):
# Record a 300
mock_conn = http.requests.request(
'GET', 'http://example.com:8004',
allow_redirects=False,
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'})
mock_conn.AndReturn(
fakes.FakeHTTPResponse(
300, 'OK', {'content-type': 'application/json'},
'{}'))
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
e = self.assertRaises(
exc.HTTPMultipleChoices, client.json_request, 'GET', '')
# Assert that the raised exception can be converted to string
self.assertIsNotNone(str(e))
self.m.VerifyAll()
def test_fake_json_request(self):
headers = {'User-Agent': 'python-heatclient'}
mock_conn = http.requests.request('GET', 'fake://example.com:8004/',
allow_redirects=False,
headers=headers)
mock_conn.AndRaise(socket.gaierror)
self.m.ReplayAll()
client = http.HTTPClient('fake://example.com:8004')
self.assertRaises(exc.InvalidEndpoint,
client._http_request, "/", "GET")
self.m.VerifyAll()
def test_debug_curl_command(self):
self.m.StubOutWithMock(logging.Logger, 'debug')
ssl_connection_params = {'ca_file': 'TEST_CA',
'cert_file': 'TEST_CERT',
'key_file': 'TEST_KEY',
'insecure': 'TEST_NSA'}
headers = {'key': 'value'}
mock_logging_debug = logging.Logger.debug(
"curl -i -X GET -H 'key: value' --key TEST_KEY "
"--cert TEST_CERT --cacert TEST_CA "
"-k -d 'text' http://foo/bar"
)
mock_logging_debug.AndReturn(None)
self.m.ReplayAll()
client = http.HTTPClient('http://foo')
client.ssl_connection_params = ssl_connection_params
client.log_curl_request('GET', '/bar', {'headers': headers,
'data': 'text'})
self.m.VerifyAll()
def test_http_request_socket_error(self):
headers = {'User-Agent': 'python-heatclient'}
mock_conn = http.requests.request('GET', 'http://example.com:8004/',
allow_redirects=False,
headers=headers)
mock_conn.AndRaise(socket.error)
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
self.assertRaises(exc.CommunicationError,
client._http_request, "/", "GET")
self.m.VerifyAll()
def test_http_request_socket_timeout(self):
headers = {'User-Agent': 'python-heatclient'}
mock_conn = http.requests.request('GET', 'http://example.com:8004/',
allow_redirects=False,
headers=headers)
mock_conn.AndRaise(socket.timeout)
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004')
self.assertRaises(exc.CommunicationError,
client._http_request, "/", "GET")
self.m.VerifyAll()
def test_http_request_specify_timeout(self):
mock_conn = http.requests.request(
'GET', 'http://example.com:8004',
allow_redirects=False,
headers={'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'},
timeout=float(123))
mock_conn.AndReturn(
fakes.FakeHTTPResponse(
200, 'OK',
{'content-type': 'application/json'},
'{}'))
# Replay, create client, assert
self.m.ReplayAll()
client = http.HTTPClient('http://example.com:8004', timeout='123')
resp, body = client.json_request('GET', '')
self.assertEqual(200, resp.status_code)
self.assertEqual({}, body)
self.m.VerifyAll()
def test_get_system_ca_file(self):
chosen = '/etc/ssl/certs/ca-certificates.crt'
self.m.StubOutWithMock(os.path, 'exists')
os.path.exists(chosen).AndReturn(chosen)
self.m.ReplayAll()
ca = http.get_system_ca_file()
self.assertEqual(ca, chosen)
self.m.VerifyAll()
def test_insecure_verify_cert_None(self):
client = http.HTTPClient('https://foo', insecure=True)
self.assertFalse(client.verify_cert)
def test_passed_cert_to_verify_cert(self):
client = http.HTTPClient('https://foo', ca_file="NOWHERE")
self.assertEqual(client.verify_cert, "NOWHERE")
self.m.StubOutWithMock(http, 'get_system_ca_file')
http.get_system_ca_file().AndReturn("SOMEWHERE")
self.m.ReplayAll()
client = http.HTTPClient('https://foo')
self.assertEqual(client.verify_cert, "SOMEWHERE")
def test_curl_log_i18n_headers(self):
self.m.StubOutWithMock(logging.Logger, 'debug')
kwargs = {'headers': {'Key': 'foo\xe3\x8a\x8e'}}
mock_logging_debug = logging.Logger.debug(
u"curl -i -X GET -H 'Key: foo㊎' http://somewhere"
)
mock_logging_debug.AndReturn(None)
self.m.ReplayAll()
client = http.HTTPClient('http://somewhere')
client.log_curl_request("GET", '', kwargs=kwargs)
self.m.VerifyAll()
| 38.313936
| 79
| 0.549782
| 2,498
| 25,019
| 5.383907
| 0.09968
| 0.03569
| 0.059335
| 0.073611
| 0.834709
| 0.805339
| 0.780132
| 0.773589
| 0.758867
| 0.748755
| 0
| 0.027521
| 0.31596
| 25,019
| 652
| 80
| 38.372699
| 0.758268
| 0.062153
| 0
| 0.752852
| 0
| 0
| 0.217926
| 0.017815
| 0
| 0
| 0
| 0
| 0.077947
| 1
| 0.053232
| false
| 0.020913
| 0.01711
| 0
| 0.072243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fc65a59d3087377556971ae3d9465b2618251ffa
| 18,041
|
py
|
Python
|
Code/GetBusDatafromDB.py
|
gzyszuuow/BusBunching
|
e9b01a62e422ae6525d9287bdf46c25aacdf9c6c
|
[
"Apache-2.0"
] | null | null | null |
Code/GetBusDatafromDB.py
|
gzyszuuow/BusBunching
|
e9b01a62e422ae6525d9287bdf46c25aacdf9c6c
|
[
"Apache-2.0"
] | null | null | null |
Code/GetBusDatafromDB.py
|
gzyszuuow/BusBunching
|
e9b01a62e422ae6525d9287bdf46c25aacdf9c6c
|
[
"Apache-2.0"
] | null | null | null |
import sqlite3
from sqlite3 import Error
import pandas as pd
import time
#busid = 343
def create_connection(db_file,busid):
""" create a database connection to a SQLite database """
conn = None
try:
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
#cursor.execute('select * from Feb2016_01 where ROUTE_ID =:ID', {"ID":busid})
cursor.execute("SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_01 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_02 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_03 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_04 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_05 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_06 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_07 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_08 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_09 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_10 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_11 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_12 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_13 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_14 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_15 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_16 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_17 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_18 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_19 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_20 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_21 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_22 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_23 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_24 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_25 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_26 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_27 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_28 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Feb2016_29 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_01 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_02 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_03 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_04 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_05 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_06 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_07 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_08 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_09 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_10 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_11 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_12 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_13 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_14 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_15 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_16 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_17 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_18 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_19 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_20 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_21 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_22 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_23 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_24 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_25 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_26 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_27 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_28 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_29 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_30 WHERE ROUTE_ID = :ID union all"+" "+
"SELECT CIN, CARD_TYP_CD, ROUTE_ID, BUS_ID, OPRTR_ID, RUN_DIR_CD, TRIP_ID, JS_STRT_DT_FK, TAG1_TM, TAG1_TS_NUM, TAG1_TS_NM, TAG1_LAT_VAL, TAG1_LONG_VAL, TAG2_TM, TAG2_TS_NUM, TAG2_TS_NM, TAG2_LAT_VAL, TAG2_LONG_VAL FROM Mar2016_30 WHERE ROUTE_ID = :ID "
,{"ID":busid})
values = cursor.fetchall()
name=["CIN","CARD_TYP_CD","ROUTE_ID","BUS_ID","OPRTR_ID","RUN_DIR_CD","TRIP_ID","JS_STRT_DT_FK","TAG1_TM","TAG1_TS_NUM","TAG1_TS_NM","TAG1_LAT_VAL","TAG1_LONG_VAL","TAG2_TM","TAG2_TS_NUM","TAG2_TS_NM","TAG2_LAT_VAL","TAG2_LONG_VAL"]
values_csv=pd.DataFrame(columns=name,data=values)
#values_csv.sort_values("")
values_csv.to_csv("C:\\Users\\bdu\\Desktop\\gzy\\BusBunching\\BusData\\"+str(busid)+".csv")
print(busid)
print(values_csv)
print("--------------------------------")
print()
except Error as e:
print(e)
finally:
if conn:
conn.close()
if __name__ == '__main__':
start = time.time()
busids = [326,327,386,387,369,397,399,412]
for busid in busids:
create_connection(r"C:\D\Opal2016FebMar\opal4MA.db",busid)
end = time.time()
print(end-start)
| 168.607477
| 291
| 0.759991
| 3,602
| 18,041
| 3.276513
| 0.034703
| 0.072361
| 0.062023
| 0.072361
| 0.950347
| 0.950347
| 0.949161
| 0.946873
| 0.946873
| 0.946873
| 0
| 0.065479
| 0.144172
| 18,041
| 107
| 292
| 168.607477
| 0.698899
| 0.00909
| 0
| 0
| 0
| 0.666667
| 0.897934
| 0.006418
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011111
| false
| 0
| 0.044444
| 0
| 0.055556
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
5dc6d396f3cb22ee822eb9b23a50aca1efb8ae0e
| 5,934
|
py
|
Python
|
download.py
|
superChoi7/DeepJ
|
b4d0964079daf9ce5fb15aface3cc0efcc4a01bf
|
[
"MIT"
] | 1
|
2021-12-31T19:08:16.000Z
|
2021-12-31T19:08:16.000Z
|
download.py
|
superChoi7/DeepJ
|
b4d0964079daf9ce5fb15aface3cc0efcc4a01bf
|
[
"MIT"
] | null | null | null |
download.py
|
superChoi7/DeepJ
|
b4d0964079daf9ce5fb15aface3cc0efcc4a01bf
|
[
"MIT"
] | 1
|
2022-02-28T00:42:50.000Z
|
2022-02-28T00:42:50.000Z
|
import os
from pathlib import Path
styles = [
[
'data/classical/beethoven',
'data/classical/haydn',
'data/classical/mozart'
],
[
'data/romantic/borodin',
'data/romantic/brahms',
'data/romantic/tschai'
]
]
dataurls = [
[
[
'http://www.piano-midi.de/midis/beethoven/beethoven_opus10_1_format0.mid',
'http://www.piano-midi.de/midis/beethoven/beethoven_opus10_2_format0.mid',
'http://www.piano-midi.de/midis/beethoven/beethoven_opus10_3_format0.mid',
'http://www.piano-midi.de/midis/beethoven/pathetique_1_format0.mid',
'http://www.piano-midi.de/midis/beethoven/pathetique_2_format0.mid',
'http://www.piano-midi.de/midis/beethoven/pathetique_3_format0.mid',
'http://www.piano-midi.de/midis/beethoven/beethoven_opus22_1_format0.mid',
'http://www.piano-midi.de/midis/beethoven/beethoven_opus22_2_format0.mid',
'http://www.piano-midi.de/midis/beethoven/beethoven_opus22_3_format0.mid',
'http://www.piano-midi.de/midis/beethoven/beethoven_hammerklavier_1_format0.mid',
'http://www.piano-midi.de/midis/beethoven/beethoven_hammerklavier_2_format0.mid',
'http://www.piano-midi.de/midis/beethoven/beethoven_hammerklavier_3_format0.mid'
],
[
'http://www.piano-midi.de/midis/haydn/haydn_7_1_format0.mid',
'http://www.piano-midi.de/midis/haydn/haydn_7_2_format0.mid',
'http://www.piano-midi.de/midis/haydn/haydn_7_3_format0.mid',
'http://www.piano-midi.de/midis/haydn/haydn_8_1_format0.mid',
'http://www.piano-midi.de/midis/haydn/haydn_8_2_format0.mid',
'http://www.piano-midi.de/midis/haydn/haydn_8_3_format0.mid',
'http://www.piano-midi.de/midis/haydn/haydn_8_4_format0.mid',
'http://www.piano-midi.de/midis/haydn/haydn_9_1_format0.mid',
'http://www.piano-midi.de/midis/haydn/haydn_9_2_format0.mid',
'http://www.piano-midi.de/midis/haydn/haydn_9_3_format0.mid',
'http://www.piano-midi.de/midis/haydn/haydn_43_1_format0.mid',
'http://www.piano-midi.de/midis/haydn/haydn_43_2_format0.mid',
'http://www.piano-midi.de/midis/haydn/haydn_43_3_format0.mid'
],
[
'http://www.piano-midi.de/midis/mozart/mz_311_1_format0.mid',
'http://www.piano-midi.de/midis/mozart/mz_311_2_format0.mid',
'http://www.piano-midi.de/midis/mozart/mz_311_3_format0.mid',
'http://www.piano-midi.de/midis/mozart/mz_330_1_format0.mid',
'http://www.piano-midi.de/midis/mozart/mz_330_2_format0.mid',
'http://www.piano-midi.de/midis/mozart/mz_330_3_format0.mid',
'http://www.piano-midi.de/midis/mozart/mz_331_1_format0.mid',
'http://www.piano-midi.de/midis/mozart/mz_331_2_format0.mid',
'http://www.piano-midi.de/midis/mozart/mz_331_3_format0.mid',
'http://www.piano-midi.de/midis/mozart/mz_570_1_format0.mid',
'http://www.piano-midi.de/midis/mozart/mz_570_2_format0.mid',
'http://www.piano-midi.de/midis/mozart/mz_570_3_format0.mid'
]
],
[
[
'http://www.piano-midi.de/midis/borodin/bor_ps1_format0.mid',
'http://www.piano-midi.de/midis/borodin/bor_ps2_format0.mid',
'http://www.piano-midi.de/midis/borodin/bor_ps3_format0.mid',
'http://www.piano-midi.de/midis/borodin/bor_ps1_format4.mid',
'http://www.piano-midi.de/midis/borodin/bor_ps1_format5.mid',
'http://www.piano-midi.de/midis/borodin/bor_ps6_format0.mid',
'http://www.piano-midi.de/midis/borodin/bor_ps7_format0.mid'
],
[
'http://www.piano-midi.de/midis/brahms/brahms_opus1_1_format0.mid',
'http://www.piano-midi.de/midis/brahms/brahms_opus1_2_format0.mid',
'http://www.piano-midi.de/midis/brahms/brahms_opus1_3_format0.mid',
'http://www.piano-midi.de/midis/brahms/brahms_opus1_4_format0.mid',
'http://www.piano-midi.de/midis/brahms/br_im2_format0.mid',
'http://www.piano-midi.de/midis/brahms/br_im5_format0.mid',
'http://www.piano-midi.de/midis/brahms/br_im6_format0.mid',
'http://www.piano-midi.de/midis/brahms/brahms_opus117_1_format0.mid',
'http://www.piano-midi.de/midis/brahms/brahms_opus117_2_format0.mid',
'http://www.piano-midi.de/midis/brahms/br_rhap_format0.mid'
],
[
'http://www.piano-midi.de/midis/tchaikovsky/ty_januar_format0.mid',
'http://www.piano-midi.de/midis/tchaikovsky/ty_februar_format0.mid',
'http://www.piano-midi.de/midis/tchaikovsky/ty_maerz_format0.mid',
'http://www.piano-midi.de/midis/tchaikovsky/ty_april_format0.mid',
'http://www.piano-midi.de/midis/tchaikovsky/ty_mai_format0.mid',
'http://www.piano-midi.de/midis/tchaikovsky/ty_juni_format0.mid',
'http://www.piano-midi.de/midis/tchaikovsky/ty_juli_format0.mid',
'http://www.piano-midi.de/midis/tchaikovsky/ty_august_format0.mid',
'http://www.piano-midi.de/midis/tchaikovsky/ty_september_format0.mid',
'http://www.piano-midi.de/midis/tchaikovsky/ty_oktober_format0.mid',
'http://www.piano-midi.de/midis/tchaikovsky/ty_november_format0.mid',
'http://www.piano-midi.de/midis/tchaikovsky/ty_dezember_format0.mid'
]
]
]
def downloadDataset(styles, dataurls):
for i, style in enumerate(styles):
for j, dir in enumerate(style):
Path(dir).mkdir(parents=True, exist_ok=True)
for item in dataurls[i][j]:
os.system('wget -q {} -P {}'.format(item, dir))
if __name__ == '__main__':
downloadDataset(styles, dataurls)
| 53.459459
| 93
| 0.640546
| 836
| 5,934
| 4.330144
| 0.111244
| 0.127624
| 0.218785
| 0.291713
| 0.867956
| 0.867956
| 0.867956
| 0.867956
| 0.867956
| 0.843923
| 0
| 0.040538
| 0.197674
| 5,934
| 111
| 94
| 53.459459
| 0.719807
| 0
| 0
| 0.056604
| 0
| 0
| 0.715249
| 0.01112
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009434
| false
| 0
| 0.018868
| 0
| 0.028302
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
b90baf449a9c0715bd6069725c242ab26c1c70f3
| 3,623
|
py
|
Python
|
src/NN.py
|
QuentinVecchio/KTH-ANN-Lab1
|
43c90d728b6ec962f31a04e2d0b83bd96a7927ef
|
[
"MIT"
] | null | null | null |
src/NN.py
|
QuentinVecchio/KTH-ANN-Lab1
|
43c90d728b6ec962f31a04e2d0b83bd96a7927ef
|
[
"MIT"
] | null | null | null |
src/NN.py
|
QuentinVecchio/KTH-ANN-Lab1
|
43c90d728b6ec962f31a04e2d0b83bd96a7927ef
|
[
"MIT"
] | null | null | null |
import numpy as np
class SingleLayerNN():
def __init__(self, lr=0.0001, nb_eboch=20, batch_size=20):
self.batch_size = batch_size
self.lr = lr
self.nb_eboch = nb_eboch
self.W = []
def TLU(self, Y):
output = []
for y in Y[0]:
if (y > 0):
output.append(1)
else:
output.append(-1)
return output
def fit(self, X, T): # X = (len(X[0]) + 1, n)
X = np.vstack([X, [1] * len(X[0])])
WHistory = []
eHistory = []
self.W = np.reshape(np.random.normal(0, 0.1, len(X)), (1, len(X)))
WHistory.append(self.W)
for step in range(self.nb_eboch):
p = np.random.permutation(len(X[0]))
X = X.T[p].T
T = T[p]
batchIndex_list =[]
if(self.batch_size == -1):
batchIndex_list.append([0,len(X[0])])
else:
for i in range(int((len(X[0]) * 1.0) / self.batch_size)):
batchIndex_list.append([i * self.batch_size, (i + 1) * self.batch_size])
for batchIndex in batchIndex_list:
start, end = batchIndex
batch = X.T[start : end].T
print(batch.shape)
WX = np.dot(self.W, batch)# Prediction : (1, len(X) + 1) * (len(X) + 1, n) =(1, n)
aux = T.T[start:end].T - WX
e = T.T[start:end].T - self.TLU(WX)
diff = self.lr * np.dot(aux, batch.T)
self.W = self.W + diff
eHistory.append(np.mean(abs(e/2)))
WHistory.append(self.W)
WX = np.dot(self.W, X)
e = T - self.TLU(WX)
eHistory.append(np.mean(abs(e/2)))
return WHistory, eHistory
def predict(self, X):
X = np.vstack([X, [1] * len(X[0])])
return self.TLU(np.dot(self.W, X))
class Perceptron():
def __init__(self, lr=0.0001, nb_eboch=200, batch_size=20):
self.batch_size = batch_size
self.lr = lr
self.nb_eboch = nb_eboch
self.W = []
def TLU(self, Y):
output = []
for y in Y[0]:
if (y > 0):
output.append(1)
else:
output.append(-1)
return output
def fit(self, X, T): # X = (len(X[0]) + 1, n)
X = np.vstack([X, [1] * len(X[0])])
WHistory = []
eHistory = []
self.W = np.reshape(np.random.normal(0, 0.1, len(X)), (1, len(X)))
WHistory.append(self.W)
for step in range(self.nb_eboch):
p = np.random.permutation(len(X[0]))
X = X.T[p].T
T = T[p]
batchIndex_list = []
if (self.batch_size == -1):
batchIndex_list.append([0, len(X[0])])
else:
for i in range(int((len(X[0]) * 1.0) / self.batch_size)):
batchIndex_list.append([i * self.batch_size, (i + 1) * self.batch_size])
for batchIndex in batchIndex_list:
start, end = batchIndex
batch = X.T[start: end].T
WX = np.dot(self.W, batch) # Prediction : (1, len(X[0]) + 1) * (len(X[0]) + 1, n) =(1, n)
e = T.T[start:end].T - self.TLU(WX)
diff = self.lr * np.dot(e, batch.T)
self.W = self.W + diff
eHistory.append(np.mean(abs(e/2)))
WHistory.append(self.W)
return WHistory, eHistory
def predict(self, X):
X = np.vstack([X, [1] * len(X[0])])
return self.TLU(np.dot(self.W, X))
| 32.061947
| 106
| 0.464532
| 514
| 3,623
| 3.200389
| 0.122568
| 0.048632
| 0.042553
| 0.025532
| 0.944073
| 0.922796
| 0.919757
| 0.904559
| 0.874164
| 0.874164
| 0
| 0.033673
| 0.377036
| 3,623
| 112
| 107
| 32.348214
| 0.695171
| 0.044438
| 0
| 0.880435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.01087
| 0
| 0.184783
| 0.01087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5d2e8fa4aa61f072c97e24c138e7427d9b98c705
| 47
|
py
|
Python
|
resources/__init__.py
|
AndersSpringborg/knox-d
|
6591f25b210d16c20bfd64e528b9a5a2d3e15d1d
|
[
"MIT"
] | 1
|
2020-09-27T11:54:34.000Z
|
2020-09-27T11:54:34.000Z
|
resources/__init__.py
|
AndersSpringborg/knox-d
|
6591f25b210d16c20bfd64e528b9a5a2d3e15d1d
|
[
"MIT"
] | 9
|
2020-10-28T13:04:56.000Z
|
2020-12-14T12:10:15.000Z
|
resources/__init__.py
|
AndersSpringborg/knox-d
|
6591f25b210d16c20bfd64e528b9a5a2d3e15d1d
|
[
"MIT"
] | null | null | null |
from . import error
from . import knox_triples
| 15.666667
| 26
| 0.787234
| 7
| 47
| 5.142857
| 0.714286
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 47
| 2
| 27
| 23.5
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5d3244aa9ba876cb368f0bd48999e75406483d69
| 134,339
|
py
|
Python
|
Python/generateFeatureControllers.py
|
TXSTDroneResearch/libARController
|
f287a78c478af81757d9f1b930f15ee75a49cb64
|
[
"BSD-3-Clause"
] | null | null | null |
Python/generateFeatureControllers.py
|
TXSTDroneResearch/libARController
|
f287a78c478af81757d9f1b930f15ee75a49cb64
|
[
"BSD-3-Clause"
] | null | null | null |
Python/generateFeatureControllers.py
|
TXSTDroneResearch/libARController
|
f287a78c478af81757d9f1b930f15ee75a49cb64
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
'''
Copyright (C) 2014 Parrot SA
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Parrot nor the names
of its contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
'''
import sys
import os
import re
import arsdkparser
MYDIR=os.path.abspath(os.path.dirname(__file__))
PACKAGES_DIR=os.path.realpath(os.path.join(MYDIR, "../.."))
sys.path.append('%(PACKAGES_DIR)s/ARSDKBuildUtils/Utils/Python' % locals())
sys.path.append('%(PACKAGES_DIR)s/libARCommands/Tools' % locals())
from ARFuncs import *
from libARCommandsgen import *
from ARControllerUtils import *
from arsdkparser import *
_LIST_FLAG = 'list_flags'
CTRL_FTR_H_NAME = 'ARCONTROLLER_Feature.h'
CTRL_FTR_PRIV_H_NAME = 'ARCONTROLLER_Feature.h'
CTRL_FTR_C_NAME = 'ARCONTROLLER_Feature.c'
CTRL_FTR_JNI_C_NAME = 'ARCONTROLLER_JNI_Features.c'
def getGenericListFlagsEnum(ctx):
ftr_gen = ctx.featuresByName['generic']
return ftr_gen.enumsByName[_LIST_FLAG]
def msgs_without_multiset(msgs):
for msg in msgs:
if not [argx for argx in msg.args if isinstance(argx.argType, ArMultiSetting)]:
yield msg
def msgs_with_multiset(msgs):
for msg in msgs:
if [argx for argx in msg.args if isinstance(argx.argType, ArMultiSetting)]:
yield msg
def generateFeatureControllers (ctx, SRC_DIR, INC_DIR):
allFeatures = ctx.features
ARPrint ('generateFeatureControllers ...')
#########################################
# Write Feature controller header file #
#########################################
#for feature in allFeatures: # see automake all source of folder !!!!
#className = ARTypeName (MODULE_FEATURE, get_ftr_old_name(feature), '') # see automake all source of folder !!!!
includeDefine = '_' + MODULE_FEATURE + '_H_' #includeDefine = '_' + ARMacroName (MODULE_FEATURE, get_ftr_old_name(feature), 'H') + '_' # see automake all source of folder !!!!
headerFileName = CTRL_FTR_H_NAME #headerFileName = className + '.h'!!!!
filepath = INC_DIR + headerFileName
hfile = open (filepath, 'w')
hfile.write ('/**********************************************************\n')
hfile.write (' * AUTOGENERATED FILE *\n')
hfile.write (' * DO NOT MODIFY IT *\n')
hfile.write (' * *\n')
hfile.write (' * To add new commands : *\n')
hfile.write (' * - Modify ../Xml/commands.xml file *\n')
hfile.write (' * - Re-run generateFeatureControllers.py script *\n')
hfile.write (' * *\n')
hfile.write (' **********************************************************/\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write ('* @file '+headerFileName+'\n')
hfile.write ('* @brief Feature controller allow to send command related of a Feature.\n') #hfile.write ('* @brief Feature controller allow to send command related of '+get_ftr_old_name(feature)+' Feature.\n')
hfile.write ('*/\n')
hfile.write ('\n')
hfile.write ('#ifndef '+includeDefine+'\n')
hfile.write ('#define '+includeDefine+'\n')
hfile.write ('\n')
hfile.write ('#include <stdlib.h>\n')
hfile.write ('\n')
hfile.write ('#include <libARSAL/ARSAL_Print.h>\n')
hfile.write ('#include <libARSAL/ARSAL_Mutex.h>\n')
hfile.write ('#include <libARCommands/ARCommands.h>\n')
hfile.write ('#include <uthash/uthash.h>\n')
hfile.write ('\n')
hfile.write ('#include <libARController/ARCONTROLLER_Error.h>\n')
hfile.write ('#include <libARController/ARCONTROLLER_Network.h>\n')
hfile.write ('#include <libARController/ARCONTROLLER_DICTIONARY_Key.h>\n')
hfile.write ('#include <libARController/ARCONTROLLER_Dictionary.h>\n')
hfile.write ('\n')
for feature in ctx.features: # see automake all source of folder !!!!!!!!!!!!!!
className = ARTypeName (MODULE_FEATURE, get_ftr_old_name(feature), '') # see automake all source of folder !!!!
classPrivName = ARTypeName (MODULE_FEATURE, get_ftr_old_name(feature), 'Private') # see automake all source of folder !!!!
hfile.write ('/*******************************\n') # see automake all source of folder !!!!!!!!
hfile.write (' * --- FEATURE '+get_ftr_old_name(feature)+' --- \n') # see automake all source of folder !!!!!!!!
hfile.write (' ******************************/\n') # see automake all source of folder !!!!!!!!
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Private part of '+className+'.\n')
hfile.write (' */\n')
hfile.write ('typedef struct '+classPrivName+' '+classPrivName+';\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Feature controller allow to send command related of '+get_ftr_old_name(feature)+' Feature.\n')
hfile.write (' * ' + feature.doc.replace('\n', '\n * ')+'\n')
hfile.write (' */\n')
hfile.write ('typedef struct '+className+' '+className+';\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Create a new '+get_ftr_old_name(feature)+' Feature Controller\n')
hfile.write (' * @warning This function allocate memory\n')
hfile.write (' * @post ' + ARFunctionName (MODULE_FEATURE, get_ftr_old_name(feature), 'Delete')+'() must be called to delete the Feature Controller and free the memory allocated.\n')
hfile.write (' * @param[in] networkController The networkController used to send commands ; can be NULL and defind later with '+ARFunctionName(MODULE_FEATURE, get_ftr_old_name(feature), 'SetNetworkController')+'().\n')
hfile.write (' * @param[out] error executing error.\n')
hfile.write (' * @return the new '+get_ftr_old_name(feature)+' Feature Controller\n')
hfile.write (' * @see ' + ARFunctionName (MODULE_FEATURE, get_ftr_old_name(feature), 'Delete')+'\n')
hfile.write (' */\n')
hfile.write (''+className+' *' + ARFunctionName (MODULE_FEATURE, get_ftr_old_name(feature), 'New')+' (ARCONTROLLER_Network_t *networkController, eARCONTROLLER_ERROR *error);\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Delete the '+get_ftr_old_name(feature)+' Feature Controller\n')
hfile.write (' * @warning This function free memory\n')
hfile.write (' * @param feature The feature controller to delete\n')
hfile.write (' * @see ' + ARFunctionName (MODULE_FEATURE, get_ftr_old_name(feature), 'New')+'\n')
hfile.write (' */\n')
hfile.write ('void ' + ARFunctionName (MODULE_FEATURE, get_ftr_old_name(feature), 'Delete')+' ('+className+' **feature);\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Register the feature controller to be called when the commands are decoded.\n')
hfile.write (' * @param feature The feature controller to register\n')
hfile.write (' * return executing error\n')
hfile.write (' */\n')
hfile.write ('eARCONTROLLER_ERROR '+ARFunctionName (MODULE_FEATURE, get_ftr_old_name(feature), 'RegisterARCommands')+' ('+className+' *feature);\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Unegister the feature controller to be called when the commands are decoded.\n')
hfile.write (' * @param feature The feature controller to unregister\n')
hfile.write (' * return executing error\n')
hfile.write (' */\n')
hfile.write ('eARCONTROLLER_ERROR '+ARFunctionName (MODULE_FEATURE, get_ftr_old_name(feature), 'UnregisterARCommands')+' ('+className+' *feature);\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Get the dictionay of the '+get_ftr_old_name(feature)+' Feature Controller\n')
hfile.write (' * @param feature The feature controller owning the dictionary to get\n')
hfile.write (' * @param[out] error executing error.\n')
hfile.write (' */\n')
hfile.write (''+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'COMMANDS')+' *' + ARFunctionName (MODULE_FEATURE, get_ftr_old_name(feature), 'GetDictionary')+' ('+className+' *feature, eARCONTROLLER_ERROR *error);\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Add a callback to use when a command in project <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code> is received\n')
hfile.write (' * @param feature The feature controller receiving the command.\n')
hfile.write (' * @param[in] callback the callback to add.\n')
hfile.write (' * @param[in] commandKey Key of the command which the callback must be associated.\n')
hfile.write (' * @param[out] error executing error.\n')
hfile.write (' * @param[int] customData custom data given as parameter to the callback.\n')
hfile.write (' * @see '+ARFunctionName(MODULE_FEATURE, get_ftr_old_name(feature), 'removeCallback')+'.\n')
hfile.write (' */\n')
hfile.write ('eARCONTROLLER_ERROR '+ARFunctionName(MODULE_FEATURE, get_ftr_old_name(feature), 'addCallback')+' ('+className+' *feature, '+defineNotificationDef()+' commandKey, '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'CALLBACK')+' callback, void *customData);\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Remove a callback used when a command in project <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code> is received\n')
hfile.write (' * @param feature The feature controller receiving the command.\n')
hfile.write (' * @param[in] commandKey Key of the command which the callback must be unassociated.\n')
hfile.write (' * @param[in] callback the callback to remove.\n')
hfile.write (' * @param[int] customData The custom data given to the register.\n')
hfile.write (' * @param[out] error executing error.\n')
hfile.write (' */\n')
hfile.write ('eARCONTROLLER_ERROR '+ARFunctionName(MODULE_FEATURE, get_ftr_old_name(feature), 'removeCallback')+' ('+className+' *feature, '+defineNotificationDef()+' commandKey, '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'CALLBACK')+' callback, void *customData);\n')
hfile.write ('\n')
for evt in feature.evts:
for arg in [arg1 for arg1 in evt.args if arg1.name != _LIST_FLAG]:
hfile.write ('extern const char *' + defineNotification(feature, evt, arg) + '; /**< Key of the argument </code>'+ arg.name+'</code> of event <code>' + ARCapitalize (format_cmd_name(evt)) + '</code> in feature <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code> */\n')
hfile.write('\n');
for cmd in feature.cmds:
hfile.write ('/**\n')
hfile.write (' * @brief Send a command <code>' + ARCapitalize (format_cmd_name(cmd)) + '</code> in feature <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code>\n')
if cmd.isDeprecated:
hfile.write (' * @deprecated\n')
hfile.write (' * ' + cmd.doc.desc.replace('\n', '\n * ')+'\n')
hfile.write (' * @param feature feature owning the commands\n')
for arg in cmd.args:
hfile.write (' * @param ' + arg.name + ' ' + get_arg_doc(arg).replace('\n', ' ') + '\n')
hfile.write (' * return executing error\n')
hfile.write (' */\n')
hfile.write ('typedef eARCONTROLLER_ERROR (*'+ sendingFunctionType (MODULE_FEATURE, feature, cmd)+') ('+className+' *feature')
for arg in cmd.args:
hfile.write (', ' + xmlToC (MODULE_ARCOMMANDS, feature, cmd, arg, True) + ' ' + arg.name)
hfile.write (');\n')
hfile.write ('\n')
if cmd.bufferType == ArCmdBufferType.NON_ACK:
hfile.write ('/**\n')
hfile.write (' * @brief Set the parameters to send through the command <code>' + ARCapitalize (format_cmd_name(cmd)) + '</code> in project <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code>\n')
if cmd.isDeprecated:
hfile.write (' * @deprecated\n')
hfile.write (' * ' + cmd.doc.desc.replace('\n', '\n * ')+'\n')
hfile.write (' * @param feature feature owning the commands\n')
for arg in cmd.args:
hfile.write (' * @param ' + arg.name + ' ' + get_arg_doc(arg).replace('\n', ' ') + '\n')
hfile.write (' * return executing error\n')
hfile.write (' */\n')
hfile.write ('typedef eARCONTROLLER_ERROR (*' + setNAckFunctionType (feature, cmd)+') ('+className+' *feature')
for arg in cmd.args:
hfile.write (', ' + xmlToC (MODULE_ARCOMMANDS, feature, cmd, arg) + ' ' + arg.name)
hfile.write (');\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Send the a command <code>' + ARCapitalize (format_cmd_name(cmd)) + '</code> in project <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code> with the parame set beforehand \n')
if cmd.isDeprecated:
hfile.write (' * @deprecated\n')
hfile.write (' * ' + cmd.doc.desc.replace('\n', '\n * ')+'\n')
hfile.write (' * @param feature feature owning the commands\n')
hfile.write (' * @param cmdBuffer buffer to store the command\n')
hfile.write (' * @param cmdBufferSize size of the buffer\n')
hfile.write (' * return executing error\n')
hfile.write (' */\n')
hfile.write ('eARCONTROLLER_ERROR '+ sendNAckFunctionName (feature, cmd)+' ('+className+' *feature, u_int8_t *cmdBuffer, int32_t cmdBufferSize);\n')
hfile.write ('\n')
for arg in cmd.args:
hfile.write ('/**\n')
hfile.write (' * @brief Set '+arg.name+' sent through the command <code>' + ARCapitalize (format_cmd_name(cmd)) + '</code> in project <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code>\n')
if cmd.isDeprecated:
hfile.write (' * @deprecated\n')
hfile.write (' * ' + cmd.doc.desc.replace('\n', '\n * ')+'\n')
hfile.write (' * @param feature feature owning the commands\n')
hfile.write (' * @param ' + arg.name + ' ' + get_arg_doc(arg).replace('\n', ' ') + '\n')
hfile.write (' * return executing error\n')
hfile.write (' */\n')
hfile.write ('typedef eARCONTROLLER_ERROR (*' + setNAckFunctionType (feature, cmd, arg)+') ('+className+' *feature, ' + xmlToC (MODULE_ARCOMMANDS, feature, cmd, arg) + ' ' + arg.name+');\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Feature controller allow to send command related of '+get_ftr_old_name(feature)+' Feature.\n')
hfile.write (' * ' + feature.doc.replace('\n', '\n * ')+'\n')
hfile.write (' */\n')
hfile.write ('struct '+className+'\n')
hfile.write ('{\n')
for cmd in feature.cmds:
hfile.write (' '+sendingFunctionType (MODULE_FEATURE, feature, cmd)+' '+sendingFunction(cmd)+';\n')
if cmd.bufferType == ArCmdBufferType.NON_ACK:
hfile.write (' '+setNAckFunctionType (feature, cmd)+' '+setNAckFunction(cmd)+';\n')
for arg in cmd.args:
hfile.write (' ' + setNAckFunctionType (feature, cmd, arg)+' '+setNAckFunction(cmd, arg)+'; /**< Send a command <code>' + ARCapitalize (format_cmd_name(cmd)) + '</code> in feature <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code>. */\n')
hfile.write (' '+classPrivName+' *privatePart; /**< Private part of '+className+' */\n')
hfile.write ('};\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Set a NetworkController to use to send commands.\n')
hfile.write (' * @param feature The feature controller receiving the command.\n')
hfile.write (' * @param[in] commandKey Key of the command which the callback must be unassociated.\n')
hfile.write (' * @param[in] networkController The networkController used to send commands ; must be not NULL.\n')
hfile.write (' * @return error executing error.\n')
hfile.write (' */\n')
hfile.write ('eARCONTROLLER_ERROR '+ARFunctionName(MODULE_FEATURE, get_ftr_old_name(feature), 'SetNetworkController')+' ('+className+' *feature, ARCONTROLLER_Network_t *networkController);\n')
hfile.write ('\n')
hfile.write ('/**\n')
hfile.write (' * @brief Get the elements of a command received.\n')
hfile.write (' * @param feature The feature controller receiving the command.\n')
hfile.write (' * @param[in] commandKey Key of the command.\n')
hfile.write (' * @param[out] error executing error.\n')
hfile.write (' * @return Element dictionary of the command ; Can be null if an error is occurred.\n')
hfile.write (' */\n')
hfile.write ('ARCONTROLLER_DICTIONARY_ELEMENT_t *' + ARFunctionName (MODULE_ARCONTROLLER, get_ftr_old_name(feature), 'GetCommandElements')+' ('+className+' *feature, '+defineNotificationDef()+' commandKey, eARCONTROLLER_ERROR *error);\n')
hfile.write ('\n')
hfile.write ('#endif /* '+includeDefine+' */\n')
hfile.write ('\n')
hfile.write ('// END GENERATED CODE\n')
hfile.close () # see automake all source of folder !!!!!!!!
#################################################
# Write Feature controller private header file #
#################################################
#for feature in allFeatures: # see automake all source of folder !!!!!!!
#className = ARTypeName (MODULE_FEATURE, get_ftr_old_name(feature), '') # see automake all source of folder !!!!
includeDefine = '_' + MODULE_FEATURE + '_PRIVATE_H_' #includeDefine = '_' + ARMacroName (MODULE_FEATURE, get_ftr_old_name(feature), 'PRIVATE_H') + '_' # see automake all source of folder !!!!
headerPrivateFileName = CTRL_FTR_PRIV_H_NAME #headerPrivateFileName = className + '.h' # see automake all source of folder !!!!
filepath = SRC_DIR + headerPrivateFileName
hPrivFile = open (filepath, 'w')
hPrivFile.write ('/**********************************************************\n')
hPrivFile.write (' * AUTOGENERATED FILE *\n')
hPrivFile.write (' * DO NOT MODIFY IT *\n')
hPrivFile.write (' * *\n')
hPrivFile.write (' * To add new commands : *\n')
hPrivFile.write (' * - Modify ../Xml/commands.xml file *\n')
hPrivFile.write (' * - Re-run generateFeatureControllers.py script *\n')
hPrivFile.write (' * *\n')
hPrivFile.write (' **********************************************************/\n')
hPrivFile.write ('\n')
hPrivFile.write ('/**\n')
hPrivFile.write ('* @file '+headerPrivateFileName+'\n')
hPrivFile.write ('* @brief Feature controller allow to send command related of '+get_ftr_old_name(feature)+' Feature.\n')
hPrivFile.write ('*/\n')
hPrivFile.write ('\n')
hPrivFile.write ('#ifndef '+includeDefine+'\n')
hPrivFile.write ('#define '+includeDefine+'\n')
hPrivFile.write ('\n')
hPrivFile.write ('#include <libARSAL/ARSAL_Mutex.h>\n')
hPrivFile.write ('#include <libARCommands/ARCommands.h>\n')
hPrivFile.write ('#include <libARController/ARCONTROLLER_Feature.h>\n')
hPrivFile.write ('\n')
hPrivFile.write ('void ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'DeleteCommandsDictionary')+' ('+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'COMMANDS')+' **dictionary);\n')
hPrivFile.write ('\n')
hPrivFile.write (ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'COMMANDS')+' *' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'NewCommandsElement')+' (int commandKey, eARCONTROLLER_ERROR *error);\n')
hPrivFile.write ('void ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'DeleteCommandsElement')+' ('+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'COMMANDS')+' **dictCmdElement);\n')
hPrivFile.write ('\n')
hPrivFile.write ('void ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'DeleteElement')+' ('+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ELEMENT')+' **element);\n')
hPrivFile.write ('\n')
hPrivFile.write ('void ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'DeleteArgumentsDictionary')+' ('+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ARG')+' **dictionary);\n')
hPrivFile.write ('\n')
#TODO sup
'''
hPrivFile.write (''+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'COMMANDS')+' *' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'FindCmdElements')+' ('+className+' *feature, int commandKey);\n')
hPrivFile.write ('\n')
'''
hPrivFile.write ('void ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'AddElement')+' ('+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ELEMENT')+' **elementDict, '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ELEMENT')+' *newElement);\n')
hPrivFile.write ('\n')
for feature in ctx.features: # see automake all source of folder !!!!!!!!
className = ARTypeName (MODULE_FEATURE, get_ftr_old_name(feature), '') # see automake all source of folder !!!!
classPrivName = ARTypeName (MODULE_FEATURE, get_ftr_old_name(feature), 'Private') # see automake all source of folder !!!!
hPrivFile.write ('/*******************************\n') # see automake all source of folder !!!!!!!!
hPrivFile.write (' * --- FEATURE '+get_ftr_old_name(feature)+' --- \n') # see automake all source of folder !!!!!!!!
hPrivFile.write (' ******************************/\n') # see automake all source of folder !!!!!!!!
for cmd in feature.cmds:
if cmd.bufferType == ArCmdBufferType.NON_ACK:
hPrivFile.write ('/**\n')
hPrivFile.write (' * @brief Parameters to send through the command <code>' + ARCapitalize (format_cmd_name(cmd)) + '</code> in project <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code>\n')
hPrivFile.write (' */\n')
hPrivFile.write ('typedef struct\n')
hPrivFile.write ('{\n')
for arg in cmd.args:
hPrivFile.write (' ' + xmlToC (MODULE_ARCOMMANDS, feature, cmd, arg) + ' '+arg.name+'; /**< */\n')
hPrivFile.write (' void *data; /**< Custom data used to manage the sending of unacknowledged commands. */\n')
hPrivFile.write ('}'+structNAckType (feature, cmd)+';\n')
hPrivFile.write ('\n')
hPrivFile.write ('/**\n')
hPrivFile.write (' * @brief Private part of '+className+'.\n')
hPrivFile.write (' */\n')
hPrivFile.write ('struct '+classPrivName+'\n')
hPrivFile.write ('{\n')
hPrivFile.write (' ARCONTROLLER_Network_t *networkController; /**<the networkController to send commands */\n')
hPrivFile.write (' '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'COMMANDS')+' *dictionary; /**< stores states and settings of the device */\n')
hPrivFile.write (' ARCONTROLLER_Dictionary_t *commandCallbacks; /**< dictionary storing callbacks to use when the command is received. */\n')
hPrivFile.write (' ARSAL_Mutex_t mutex; /**< Mutex for multihreading */\n')
for cmd in feature.cmds:
if cmd.bufferType == ArCmdBufferType.NON_ACK:
hPrivFile.write (' '+structNAckType (feature, cmd)+' *'+structNAckName (cmd)+'; /**< */\n')
hPrivFile.write ('};\n')
hPrivFile.write ('\n')
for cmd in feature.cmds:
hPrivFile.write ('/**\n')
hPrivFile.write (' * @brief Send a command <code>' + ARCapitalize (format_cmd_name(cmd)) + '</code> in project <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code>\n')
if cmd.isDeprecated:
hPrivFile.write (' * @deprecated\n')
hPrivFile.write (' * ' + cmd.doc.desc.replace('\n', '\n * ')+'\n')
hPrivFile.write (' * @param feature feature owning the commands\n')
for arg in cmd.args:
hPrivFile.write (' * @param ' + arg.name + ' ' + get_arg_doc(arg).replace('\n', ' ') + '\n')
hPrivFile.write (' * return executing error\n')
hPrivFile.write (' */\n')
hPrivFile.write ('eARCONTROLLER_ERROR ' + sendingFunctionName (MODULE_FEATURE, feature, cmd)+' ('+className+' *feature')
for arg in cmd.args:
hPrivFile.write (', ' + xmlToC (MODULE_ARCOMMANDS, feature, cmd, arg, True) + ' ' + arg.name)
hPrivFile.write (');\n')
hPrivFile.write ('\n')
if cmd.bufferType == ArCmdBufferType.NON_ACK:
hPrivFile.write ('/**\n')
hPrivFile.write (' * @brief Set the parameters to send through the command <code>' + ARCapitalize (format_cmd_name(cmd)) + '</code> in project <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code>\n')
if cmd.isDeprecated:
hPrivFile.write (' * @deprecated\n')
hPrivFile.write (' * ' + cmd.doc.desc.replace('\n', '\n * ')+'\n')
hPrivFile.write (' * @param feature feature owning the commands\n')
for arg in cmd.args:
hPrivFile.write (' * @param ' + arg.name + ' ' + get_arg_doc(arg).replace('\n', ' ') + '\n')
hPrivFile.write (' * return executing error\n')
hPrivFile.write (' */\n')
hPrivFile.write ('eARCONTROLLER_ERROR ' + setNAckFunctionName (feature, cmd)+' ('+className+' *feature')
for arg in cmd.args:
hPrivFile.write (', ' + xmlToC (MODULE_ARCOMMANDS, feature, cmd, arg) + ' _' + arg.name)
hPrivFile.write (');\n')
hPrivFile.write ('\n')
for arg in cmd.args:
hPrivFile.write ('/**\n')
hPrivFile.write (' * @brief Set '+arg.name+' sent through the command <code>' + ARCapitalize (format_cmd_name(cmd)) + '</code> in project <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code>\n')
if cmd.isDeprecated:
hPrivFile.write (' * @deprecated\n')
hPrivFile.write (' * ' + cmd.doc.desc.replace('\n', '\n * ')+'\n')
hPrivFile.write (' * @param feature feature owning the commands\n')
hPrivFile.write (' * @param ' + arg.name + ' ' + get_arg_doc(arg).replace('\n', ' ') + '\n')
hPrivFile.write (' * return executing error\n')
hPrivFile.write (' */\n')
hPrivFile.write ('eARCONTROLLER_ERROR ' + setNAckFunctionName (feature, cmd, arg)+' ('+className+' *feature, ' + xmlToC (MODULE_ARCOMMANDS, feature, cmd, arg) + ' ' + arg.name +');\n')
hPrivFile.write ('\n')
for evt in feature.evts:
hPrivFile.write ('/**\n')
hPrivFile.write (' * @brief callback used when the command <code>' + ARCapitalize (format_cmd_name(evt)) + '</code> is decoded\n')
hPrivFile.write (' * @param feature The feature controller registred\n')
for arg in evt.args:
hPrivFile.write (' * @param ' + arg.name + ' ' + get_arg_doc(arg).replace('\n', ' ') + '\n')
hPrivFile.write (' * @param customData customData set by the register\n')
hPrivFile.write (' */\n')
hPrivFile.write ('void '+decodeCallback (feature, evt)+' (')
for arg in evt.args:
hPrivFile.write (xmlToC (MODULE_ARCOMMANDS, feature, evt, arg, True) + ' _' + arg.name + ', ')
hPrivFile.write ('void *customData);\n')
hPrivFile.write ('\n')
for evt in msgs_without_multiset(feature.evts):
hPrivFile.write (''+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ELEMENT')+' *'+ ARFunctionName (MODULE_ARCONTROLLER, get_ftr_old_name(feature), 'newCmdElement'+ARCapitalize(format_cmd_name(evt)))+' ('+className+' *feature, ')
for arg in evt.args:
hPrivFile.write (xmlToC (MODULE_ARCOMMANDS, feature, evt, arg, True) + ' _' + arg.name + ', ')
if evt.listType == ArCmdListType.LIST:
hPrivFile.write ('int listIndex, ')
hPrivFile.write ('eARCONTROLLER_ERROR *error);\n')
hPrivFile.write ('\n')
hPrivFile.write ('\n')
hPrivFile.write ('#endif /* '+includeDefine+' */\n')
hPrivFile.write ('\n')
hPrivFile.write ('// END GENERATED CODE\n')
hPrivFile.close () # see automake all source of folder !!!!!!!!
#################################################
# Write Feature controller c file #
#################################################
#for feature in allFeatures: # see automake all source of folder !!!!!!!!!!!
#className = ARTypeName (MODULE_FEATURE, get_ftr_old_name(feature), '') # see automake all source of folder !!!!!!!!!!
classTag = 'ARCONTROLLER_Feature' #classTag = ARMacroName (MODULE_FEATURE, get_ftr_old_name(feature), '') # see automake all source of folder !!!!!!!!!!
cFileName = CTRL_FTR_C_NAME #cFileName = className + '.c' # see automake all source of folder !!!!!!!!!!
filepath = SRC_DIR + CTRL_FTR_C_NAME
cFile = open (filepath, 'w')
cFile.write ('/**********************************************************\n')
cFile.write (' * AUTOGENERATED FILE *\n')
cFile.write (' * DO NOT MODIFY IT *\n')
cFile.write (' * *\n')
cFile.write (' * To add new commands : *\n')
cFile.write (' * - Modify ../Xml/commands.xml file *\n')
cFile.write (' * - Re-run generateFeatureControllers.py script *\n')
cFile.write (' * *\n')
cFile.write (' **********************************************************/\n')
cFile.write ('\n')
cFile.write ('/**\n')
cFile.write ('* @file '+cFileName+'\n')
cFile.write ('* @brief Feature controller allow to send command related of a Feature.\n') #cFile.write ('* @brief Feature controller allow to send command related of '+get_ftr_old_name(feature)+' Feature.\n') # see automake all source of folder !!!!!!!!!!
cFile.write ('*/\n')
cFile.write ('\n')
cFile.write ('#include <stdio.h>\n')
cFile.write ('#include <libARSAL/ARSAL_Mutex.h>\n')
cFile.write ('#include <libARCommands/ARCommands.h>\n')
cFile.write ('#include <libARController/ARCONTROLLER_Network.h>\n')
cFile.write ('#include <libARController/ARCONTROLLER_Feature.h>\n')
cFile.write ('#include <libARController/ARCONTROLLER_Stream.h>\n')
cFile.write ('\n')
cFile.write ('#include <ARCONTROLLER_Stream.h>\n')
cFile.write ('#include <ARCONTROLLER_StreamSender.h>\n')
cFile.write ('#include <ARCONTROLLER_Network.h>\n')
cFile.write ('#include <ARCONTROLLER_NAckCbs.h>\n')
cFile.write ('#include "ARCONTROLLER_Feature.h"\n')
cFile.write ('\n')
cFile.write ('#define '+MODULE_FEATURE+'_TAG "'+classTag+'"\n')
cFile.write ('\n')
cFile.write ('void ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'DeleteCommandsDictionary')+' ('+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'COMMANDS')+' **dictionary)\n')
cFile.write ('{\n')
cFile.write (' // -- Delete a commands dictionary --\n')
cFile.write (' \n')
cFile.write (' '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'COMMANDS')+' *dictCmdElement = NULL;\n')
cFile.write (' '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'COMMANDS')+' *dictCmdTmp = NULL;\n')
cFile.write (' '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ELEMENT')+' *dictElement = NULL;\n')
cFile.write (' '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ELEMENT')+' *dictTmp = NULL;\n')
cFile.write (' \n')
cFile.write (' if (dictionary != NULL)\n')
cFile.write (' {\n')
cFile.write (' if ((*dictionary) != NULL)\n')
cFile.write (' {\n')
cFile.write (' // Free the hash table contents\n')
cFile.write (' HASH_ITER(hh, (*dictionary), dictCmdElement, dictCmdTmp)\n')
cFile.write (' {\n')
cFile.write (' // Free the hash table contents\n')
cFile.write (' HASH_ITER(hh, dictCmdElement->elements, dictElement, dictTmp)\n')
cFile.write (' {\n')
cFile.write (' // for each element\n')
cFile.write (' \n')
cFile.write (' if (dictElement->arguments != NULL)\n')
cFile.write (' {\n')
cFile.write (' // delete all arguments\n')
cFile.write (' ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'DeleteArgumentsDictionary')+' (&(dictElement->arguments));\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' if (dictElement->key != NULL)\n')
cFile.write (' {\n')
cFile.write (' // free the key of the element\n')
cFile.write (' free (dictElement->key);\n')
cFile.write (' dictElement->key = NULL;\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' HASH_DEL (dictCmdElement->elements, dictElement);\n')
cFile.write (' free (dictElement);\n')
cFile.write (' dictElement = NULL;\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' HASH_DEL ((*dictionary), dictCmdElement);\n')
cFile.write (' free (dictCmdElement);\n')
cFile.write (' dictCmdElement = NULL;\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' free (*dictionary);\n')
cFile.write (' (*dictionary) = NULL;\n')
cFile.write (' }\n')
cFile.write (' }\n')
cFile.write ('}\n')
cFile.write ('\n')
cFile.write (ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'COMMANDS')+' *' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'NewCommandsElement')+' (int commandKey, eARCONTROLLER_ERROR *error)\n')
cFile.write ('{\n')
cFile.write (' // -- New Commands Element --\n')
cFile.write (' \n')
cFile.write (' //local declarations\n')
cFile.write (' eARCONTROLLER_ERROR localError = ARCONTROLLER_OK;\n')
cFile.write (' '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'COMMANDS')+' *dictCmdElement = malloc (sizeof('+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'COMMANDS')+'));\n')
cFile.write (' \n')
cFile.write (' if (dictCmdElement != NULL)\n')
cFile.write (' {\n')
cFile.write (' dictCmdElement->command = commandKey;\n')
cFile.write (' dictCmdElement->elements = NULL;\n')
cFile.write (' }\n')
cFile.write (' else\n')
cFile.write (' {\n')
cFile.write (' localError = ARCONTROLLER_ERROR_ALLOC;\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' // Return the error\n')
cFile.write (' if (error != NULL)\n')
cFile.write (' {\n')
cFile.write (' *error = localError;\n')
cFile.write (' }\n')
cFile.write (' // No else: error is not returned \n')
cFile.write (' \n')
cFile.write (' return dictCmdElement;\n')
cFile.write ('}\n')
cFile.write ('\n')
cFile.write ('void ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'DeleteCommandsElement')+' ('+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'COMMANDS')+' **dictCmdElement)\n')
cFile.write ('{\n')
cFile.write (' // -- Delete a commands Element --\n')
cFile.write (' \n')
cFile.write (' '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ELEMENT')+' *dictElement = NULL;\n')
cFile.write (' '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ELEMENT')+' *dictTmp = NULL;\n')
cFile.write (' \n')
cFile.write (' if (dictCmdElement != NULL)\n')
cFile.write (' {\n')
cFile.write (' if ((*dictCmdElement) != NULL)\n')
cFile.write (' {\n')
cFile.write (' // Free the hash table contents\n')
cFile.write (' HASH_ITER(hh, (*dictCmdElement)->elements, dictElement, dictTmp)\n')
cFile.write (' {\n')
cFile.write (' // for each element\n')
cFile.write (' \n')
cFile.write (' HASH_DEL ((*dictCmdElement)->elements, dictElement);\n')
cFile.write (' ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'DeleteElement')+' (&dictElement);\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' free (*dictCmdElement);\n')
cFile.write (' (*dictCmdElement) = NULL;\n')
cFile.write (' }\n')
cFile.write (' }\n')
cFile.write ('}\n')
cFile.write ('\n')
cFile.write ('void ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'DeleteElement')+' ('+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ELEMENT')+' **element)\n')
cFile.write ('{\n')
cFile.write (' // -- Delete an element --\n')
cFile.write (' \n')
cFile.write (' if (element != NULL)\n')
cFile.write (' {\n')
cFile.write (' if ((*element) != NULL)\n')
cFile.write (' {\n')
cFile.write (' if ((*element)->arguments != NULL)\n')
cFile.write (' {\n')
cFile.write (' // delete all arguments\n')
cFile.write (' ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'DeleteArgumentsDictionary')+' (&((*element)->arguments));\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' if ((*element)->key != NULL)\n')
cFile.write (' {\n')
cFile.write (' // free the key of the element\n')
cFile.write (' free ((*element)->key);\n')
cFile.write (' (*element)->key = NULL;\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' free (*element);\n')
cFile.write (' (*element) = NULL;\n')
cFile.write (' }\n')
cFile.write (' }\n')
cFile.write ('}\n')
cFile.write ('\n')
cFile.write ('void ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'DeleteArgumentsDictionary')+' ('+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ARG')+' **dictionary)\n')
cFile.write ('{\n')
cFile.write (' // -- Delete arguments dictionary --\n')
cFile.write (' \n')
cFile.write (' '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ARG')+' *dictElement = NULL;\n')
cFile.write (' '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ARG')+' *dictTmp = NULL;\n')
cFile.write (' \n')
cFile.write (' if (dictionary != NULL)\n')
cFile.write (' {\n')
cFile.write (' if ((*dictionary) != NULL)\n')
cFile.write (' {\n')
cFile.write (' // Free the hash table contents\n')
cFile.write (' HASH_ITER(hh, (*dictionary), dictElement, dictTmp)\n')
cFile.write (' {\n')
cFile.write (' /* for each element of the arguments dictionary */\n')
cFile.write (' if ((dictElement->valueType == '+AREnumValue(MODULE_ARCONTROLLER, 'DICTIONARY', 'VALUE_TYPE', 'string')+') && (dictElement->value.'+ARCapitalize('string')+' != NULL))\n')
cFile.write (' {\n')
cFile.write (' free (dictElement->value.'+ARCapitalize('string')+');\n')
cFile.write (' dictElement->value.'+ARCapitalize('string')+' = NULL;\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' HASH_DEL((*dictionary), dictElement);\n')
cFile.write (' free(dictElement);\n')
cFile.write (' dictElement = NULL;\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' free (*dictionary);\n')
cFile.write (' (*dictionary) = NULL;\n')
cFile.write (' }\n')
cFile.write (' }\n')
cFile.write ('}\n')
cFile.write ('\n')
#TODO sup
'''
cFile.write (''+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'COMMANDS')+' *' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'FindCmdElements')+' ('+className+' *feature, int commandKey)\n')
cFile.write ('{\n')
cFile.write (' // -- Find command elements --\n')
cFile.write (' \n')
cFile.write (' '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'COMMANDS')+' *dictCmdElement = NULL;\n')
cFile.write (' ARSAL_Mutex_Lock (&(feature->privatePart->mutex));\n')
cFile.write (' \n')
cFile.write (' // Find command elements\n')
cFile.write (' HASH_FIND_INT (feature->privatePart->dictionary, &commandKey, dictCmdElement);\n')
cFile.write (' \n')
cFile.write (' ARSAL_Mutex_Unlock (&(feature->privatePart->mutex));\n')
cFile.write (' \n')
cFile.write (' return dictCmdElement;\n')
cFile.write ('}\n')
cFile.write ('\n')
'''
cFile.write ('void ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'AddElement')+' ('+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ELEMENT')+' **elementDict, '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ELEMENT')+' *newElement)\n')
cFile.write ('{\n')
cFile.write (' // -- Set new element in CommandElements --\n')
cFile.write (' '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ELEMENT')+' *oldElement = NULL;\n')
cFile.write (' \n')
cFile.write (' // Find if the element already exist\n')
cFile.write (' HASH_FIND_STR ((*elementDict), newElement->key, oldElement);\n')
cFile.write (' if (oldElement != NULL)\n')
cFile.write (' {\n')
cFile.write (' HASH_REPLACE_STR ((*elementDict), key, newElement, oldElement);\n')
cFile.write (' \n')
cFile.write (' ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'DeleteArgumentsDictionary')+' (&(oldElement->arguments));\n')
cFile.write (' free (oldElement);\n')
cFile.write (' oldElement = NULL;\n')
cFile.write (' }\n')
cFile.write (' else\n')
cFile.write (' {\n')
cFile.write (' HASH_ADD_KEYPTR (hh, (*elementDict), newElement->key, strlen(newElement->key), newElement);\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write ('}\n')
cFile.write ('\n')
for feature in ctx.features: # see automake all source of folder !!!!!!!!
className = ARTypeName (MODULE_FEATURE, get_ftr_old_name(feature), '') # see automake all source of folder !!!!
classPrivName = ARTypeName (MODULE_FEATURE, get_ftr_old_name(feature), 'Private') # see automake all source of folder !!!!
cFile.write ('/*******************************\n') # see automake all source of folder !!!!!!!!
cFile.write (' * --- FEATURE '+get_ftr_old_name(feature)+' --- \n') # see automake all source of folder !!!!!!!!
cFile.write (' ******************************/\n') # see automake all source of folder !!!!!!!!
cFile.write ('\n')
cFile.write ('/*************************\n')
cFile.write (' * Private header\n')
cFile.write (' *************************/\n')
cFile.write ('\n')
cFile.write ('/*************************\n')
cFile.write (' * Implementation\n')
cFile.write (' *************************/\n')
cFile.write ('\n')
for evt in feature.evts:
for arg in [arg1 for arg1 in evt.args if arg1.name != _LIST_FLAG]:
cFile.write ('const char *' + defineNotification(feature, evt, arg) + ' = "' + defineNotification(feature, evt, arg).lower() + '";\n')
cFile.write('\n');
cFile.write (''+className+' *' + ARFunctionName (MODULE_FEATURE, get_ftr_old_name(feature), 'New')+' (ARCONTROLLER_Network_t *networkController, eARCONTROLLER_ERROR *error)\n')
cFile.write ('{\n')
cFile.write (' // -- Create a new Feature Controller --\n')
cFile.write (' \n')
cFile.write (' //local declarations\n')
cFile.write (' eARCONTROLLER_ERROR localError = ARCONTROLLER_OK;\n')
cFile.write (' '+className+' *featureController = NULL;\n')
cFile.write (' \n')
cFile.write (' if (localError == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' // Create the Feature Controller\n')
cFile.write (' featureController = malloc (sizeof ('+className+'));\n')
cFile.write (' if (featureController != NULL)\n')
cFile.write (' {\n')
for cmd in feature.cmds:
cFile.write (' featureController->'+sendingFunction(cmd)+' = '+sendingFunctionName (MODULE_FEATURE, feature, cmd)+';\n')
if cmd.bufferType == ArCmdBufferType.NON_ACK:
cFile.write (' featureController->'+setNAckFunction(cmd)+' = '+setNAckFunctionName (feature, cmd)+';\n')
for arg in cmd.args:
cFile.write (' featureController->'+setNAckFunction (cmd, arg)+' = '+setNAckFunctionName(feature, cmd, arg)+';\n')
cFile.write (' \n')
cFile.write (' featureController->privatePart = NULL;\n')
cFile.write (' }\n')
cFile.write (' else\n')
cFile.write (' {\n')
cFile.write (' localError = ARCONTROLLER_ERROR_ALLOC;\n')
cFile.write (' }\n')
cFile.write (' }\n')
cFile.write (' // No else: skipped by an error \n')
cFile.write (' \n')
cFile.write (' if (localError == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' // Create the Feature Controller private part\n')
cFile.write (' featureController->privatePart = malloc (sizeof ('+classPrivName+'));\n')
cFile.write (' if (featureController->privatePart != NULL)\n')
cFile.write (' {\n')
cFile.write (' featureController->privatePart->networkController = networkController;\n')
cFile.write (' featureController->privatePart->dictionary = NULL;\n')
cFile.write (' featureController->privatePart->commandCallbacks = NULL;\n')
for cmd in feature.cmds:
if cmd.bufferType == ArCmdBufferType.NON_ACK:
cFile.write (' featureController->privatePart->'+structNAckName (cmd)+' = NULL;\n')
cFile.write (' // Create the mutex \n')
cFile.write (' if (ARSAL_Mutex_Init (&(featureController->privatePart->mutex)) != 0)\n')
cFile.write (' {\n')
cFile.write (' localError = ARCONTROLLER_ERROR_INIT_MUTEX;\n')
cFile.write (' }\n')
cFile.write (' }\n')
cFile.write (' else\n')
cFile.write (' {\n')
cFile.write (' localError = ARCONTROLLER_ERROR_ALLOC;\n')
cFile.write (' }\n')
cFile.write (' }\n')
cFile.write (' // No else: skipped by an error \n')
cFile.write (' \n')
for cmd in feature.cmds:
if cmd.bufferType == ArCmdBufferType.NON_ACK:
cFile.write (' if (localError == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' featureController->privatePart->'+structNAckName (cmd)+' = calloc (1, sizeof ('+structNAckType (feature, cmd)+'));\n')
cFile.write (' if (featureController->privatePart->'+structNAckName (cmd)+' == NULL)\n')
cFile.write (' {\n')
cFile.write (' localError = ARCONTROLLER_ERROR_ALLOC;\n')
cFile.write (' }\n')
cFile.write (' }\n')
cFile.write (' // No else: skipped by an error \n')
cFile.write ('\n')
cFile.write (' if (localError == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' localError = '+nAckCbInit(feature, cmd)+' (featureController);\n')
cFile.write (' }\n')
cFile.write ('\n')
cFile.write (' // delete the feature Controller if an error occurred\n')
cFile.write (' if (localError != ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' ' + ARFunctionName (MODULE_FEATURE, get_ftr_old_name(feature), 'Delete')+' (&featureController);\n')
cFile.write (' }\n')
cFile.write (' // No else: skipped no error \n')
cFile.write (' \n')
cFile.write (' // Return the error\n')
cFile.write (' if (error != NULL)\n')
cFile.write (' {\n')
cFile.write (' *error = localError;\n')
cFile.write (' }\n')
cFile.write (' // No else: error is not returned \n')
cFile.write (' \n')
cFile.write (' return featureController;\n')
cFile.write ('}\n')
cFile.write ('\n')
cFile.write ('void ' + ARFunctionName (MODULE_FEATURE, get_ftr_old_name(feature), 'Delete')+' ('+className+' **feature)\n')
cFile.write ('{\n')
cFile.write (' // -- Delete the '+get_ftr_old_name(feature)+' feature Controller --\n')
cFile.write (' \n')
cFile.write (' if (feature != NULL)\n')
cFile.write (' {\n')
cFile.write (' if ((*feature) != NULL)\n')
cFile.write (' {\n')
cFile.write (' if ((*feature)->privatePart != NULL)\n')
cFile.write (' {\n')
cFile.write (' ARSAL_Mutex_Destroy (&((*feature)->privatePart->mutex));\n')
cFile.write (' \n')
cFile.write (' if ((*feature)->privatePart->dictionary != NULL)\n')
cFile.write (' {\n')
cFile.write (' ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'DeleteCommandsDictionary')+' (&((*feature)->privatePart->dictionary));\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' if ((*feature)->privatePart->commandCallbacks != NULL)\n')
cFile.write (' {\n')
cFile.write (' // Free the hash table contents the command callback\n')
cFile.write (' ARCONTROLLER_Dictionary_DeleteDictionary (&((*feature)->privatePart->commandCallbacks));\n')
cFile.write (' }\n')
cFile.write (' \n')
for cmd in feature.cmds:
if cmd.bufferType == ArCmdBufferType.NON_ACK:
cFile.write (' if ((*feature)->privatePart->'+structNAckName (cmd)+' != NULL)\n')
cFile.write (' {\n')
cFile.write (' '+nAckCbDeInit(feature, cmd)+' (*feature);\n')
cFile.write ('\n')
cFile.write (' free ((*feature)->privatePart->'+structNAckName (cmd)+');\n')
cFile.write (' (*feature)->privatePart->'+structNAckName (cmd)+' = NULL;\n')
cFile.write (' }\n')
cFile.write (' free ((*feature)->privatePart);\n')
cFile.write (' (*feature)->privatePart = NULL;\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' free (*feature);\n')
cFile.write (' (*feature) = NULL;\n')
cFile.write (' }\n')
cFile.write (' }\n')
cFile.write ('}\n')
cFile.write ('\n')
cFile.write (''+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'COMMANDS')+' *' + ARFunctionName (MODULE_FEATURE, get_ftr_old_name(feature), 'GetDictionary')+' ('+className+' *feature, eARCONTROLLER_ERROR *error)\n')
cFile.write ('{\n')
cFile.write (' // -- Get the dictionary of the '+get_ftr_old_name(feature)+' Feature Controller --\n')
cFile.write (' \n')
cFile.write (' eARCONTROLLER_ERROR localError = ARCONTROLLER_OK;\n')
cFile.write (' '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'COMMANDS')+' *dictionary = NULL;\n')
cFile.write (' \n')
cFile.write (' // Check parameters\n')
cFile.write (' if ((feature == NULL) || (feature->privatePart == NULL))\n')
cFile.write (' {\n')
cFile.write (' localError = ARCONTROLLER_ERROR_BAD_PARAMETER;\n')
cFile.write (' }\n')
cFile.write (' // No Else: the checking parameters sets error to ARNETWORK_ERROR_BAD_PARAMETER and stop the processing\n')
cFile.write (' \n')
cFile.write (' if (localError == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' dictionary = feature->privatePart->dictionary;\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' // Return the error\n')
cFile.write (' if (error != NULL)\n')
cFile.write (' {\n')
cFile.write (' *error = localError;\n')
cFile.write (' }\n')
cFile.write (' // No else: error is not returned \n')
cFile.write (' \n')
cFile.write (' return dictionary;\n')
cFile.write ('}\n')
cFile.write ('\n')
cFile.write ('eARCONTROLLER_ERROR '+ARFunctionName(MODULE_FEATURE, get_ftr_old_name(feature), 'AddCallback')+' ('+className+' *feature, '+defineNotificationDef()+' commandKey, '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'CALLBACK')+' callback, void *customData)\n')
cFile.write ('{\n')
cFile.write (' // -- Add a callback to use when a command in project <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code> is received --\n')
cFile.write (' \n')
cFile.write (' eARCONTROLLER_ERROR error = ARCONTROLLER_OK;\n')
cFile.write (' \n')
cFile.write (' // Check parameters\n')
cFile.write (' if ((feature == NULL) || (feature->privatePart == NULL))\n')
cFile.write (' {\n')
cFile.write (' error = ARCONTROLLER_ERROR_BAD_PARAMETER;\n')
cFile.write (' }\n')
cFile.write (' // No Else: the checking parameters sets error to ARNETWORK_ERROR_BAD_PARAMETER and stop the processing\n')
cFile.write (' \n')
cFile.write (' if (error == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' error = ARCONTROLLER_Dictionary_AddDictionaryElement (&(feature->privatePart->commandCallbacks), commandKey, callback, customData);\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' return error;\n')
cFile.write ('}\n')
cFile.write ('\n')
cFile.write ('eARCONTROLLER_ERROR '+ARFunctionName(MODULE_FEATURE, get_ftr_old_name(feature), 'RemoveCallback')+' ('+className+' *feature, '+defineNotificationDef()+' commandKey, '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'CALLBACK')+' callback, void *customData)\n')
cFile.write ('{\n')
cFile.write (' // -- Remove a callback to use when a command in project <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code> is received --\n')
cFile.write (' \n')
cFile.write (' eARCONTROLLER_ERROR error = ARCONTROLLER_OK;\n')
cFile.write (' \n')
cFile.write (' // Check parameters\n')
cFile.write (' if ((feature == NULL) || (feature->privatePart == NULL))\n')
cFile.write (' {\n')
cFile.write (' error = ARCONTROLLER_ERROR_BAD_PARAMETER;\n')
cFile.write (' }\n')
cFile.write (' // No Else: the checking parameters sets error to ARNETWORK_ERROR_BAD_PARAMETER and stop the processing\n')
cFile.write (' \n')
cFile.write (' if (error == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' error = ARCONTROLLER_Dictionary_RemoveDictionaryElement (feature->privatePart->commandCallbacks, commandKey, callback, customData);\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' return error;\n')
cFile.write ('}\n')
cFile.write ('\n')
cFile.write ('eARCONTROLLER_ERROR '+ARFunctionName (MODULE_FEATURE, get_ftr_old_name(feature), 'RegisterARCommands')+' ('+className+' *feature)\n')
cFile.write ('{\n')
cFile.write (' // -- Register the feature controller to be called when the commands are decoded. -- \n')
cFile.write (' \n')
cFile.write (' eARCONTROLLER_ERROR error = ARCONTROLLER_OK;\n')
cFile.write (' \n')
cFile.write (' // Check parameters\n')
cFile.write (' if ((feature == NULL) || (feature->privatePart == NULL) || (feature->privatePart->networkController == NULL) || (feature->privatePart->networkController->decoder == NULL))\n')
cFile.write (' {\n')
cFile.write (' error = ARCONTROLLER_ERROR_BAD_PARAMETER;\n')
cFile.write (' }\n')
cFile.write (' // No Else: the checking parameters sets error to ARNETWORK_ERROR_BAD_PARAMETER and stop the processing\n')
cFile.write (' \n')
cFile.write (' if (error == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
for evt in feature.evts:
cFile.write (' '+arcommandsSetDecode(feature, evt)+' (feature->privatePart->networkController->decoder, &'+decodeCallback(feature, evt)+', feature);\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' return error;\n')
cFile.write ('}\n')
cFile.write ('\n')
cFile.write ('eARCONTROLLER_ERROR '+ARFunctionName (MODULE_FEATURE, get_ftr_old_name(feature), 'UnregisterARCommands')+' ('+className+' *feature)\n')
cFile.write ('{\n')
cFile.write (' // -- Unregister the feature controller to be called when the commands are decoded. -- \n')
cFile.write (' \n')
cFile.write (' eARCONTROLLER_ERROR error = ARCONTROLLER_OK;\n')
cFile.write (' \n')
cFile.write (' // Check parameters\n')
cFile.write (' if ((feature == NULL) || (feature->privatePart == NULL) || (feature->privatePart->networkController == NULL) || (feature->privatePart->networkController->decoder == NULL))\n')
cFile.write (' {\n')
cFile.write (' error = ARCONTROLLER_ERROR_BAD_PARAMETER;\n')
cFile.write (' }\n')
cFile.write (' // No Else: the checking parameters sets error to ARNETWORK_ERROR_BAD_PARAMETER and stop the processing\n')
cFile.write (' \n')
cFile.write (' if (error == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
for evt in feature.evts:
cFile.write (' '+arcommandsSetDecode(feature, evt)+' (feature->privatePart->networkController->decoder, NULL, NULL);\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' return error;\n')
cFile.write ('}\n')
cFile.write ('\n')
for cmd in feature.cmds:
cFile.write ('eARCONTROLLER_ERROR ' + sendingFunctionName (MODULE_FEATURE, feature, cmd)+' ('+className+' *feature')
for arg in cmd.args:
cFile.write (', ' + xmlToC (MODULE_ARCOMMANDS, feature, cmd, arg, True) + ' ' + arg.name)
cFile.write (')\n')
cFile.write ('{\n')
cFile.write (' // -- Send a command <code>' + ARCapitalize (format_cmd_name(cmd)) + '</code> in project <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code> --\n')
cFile.write (' \n')
cFile.write (' eARCONTROLLER_ERROR error = ARCONTROLLER_OK;\n')
if [arg for arg in cmd.args if isinstance(arg.argType, ArMultiSetting)]:
cFile.write (' u_int8_t cmdBuffer[4096];\n')
else:
cFile.write (' u_int8_t cmdBuffer[512];\n')
cFile.write (' int32_t cmdSize = 0;\n')
cFile.write (' eARCOMMANDS_GENERATOR_ERROR cmdError = ARCOMMANDS_GENERATOR_OK;\n')
cFile.write (' eARNETWORK_ERROR netError = ARNETWORK_OK;\n')
cFile.write (' \n')
cFile.write (' // Check parameters\n')
cFile.write (' if (feature == NULL)\n')
cFile.write (' {\n')
cFile.write (' error = ARCONTROLLER_ERROR_BAD_PARAMETER;\n')
cFile.write (' }\n')
cFile.write (' // No Else: the checking parameters sets error to ARNETWORK_ERROR_BAD_PARAMETER and stop the processing\n')
cFile.write (' \n')
cFile.write (' if (error == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' // Send ' + ARCapitalize(cmd.name) + ' command\n')
cFile.write (' cmdError = ARCOMMANDS_Generator_Generate' + ARCapitalize(get_ftr_old_name(feature)) + ARCapitalize(format_cmd_name(cmd)) + '(cmdBuffer, sizeof(cmdBuffer), &cmdSize')
for arg in cmd.args:
cFile.write (', ' + arg.name)
cFile.write(');\n')
cFile.write (' if (cmdError != ARCOMMANDS_GENERATOR_OK)\n')
cFile.write (' {\n')
cFile.write (' error = ARCONTROLLER_ERROR_COMMAND_GENERATING;\n')
cFile.write (' }\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' if (error == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
bufferType = 'ARCONTROLLER_NETWORK_SENDING_DATA_TYPE_ACK'
if cmd.bufferType == ArCmdBufferType.NON_ACK:
bufferType = 'ARCONTROLLER_NETWORK_SENDING_DATA_TYPE_NOT_ACK'
elif cmd.bufferType == ArCmdBufferType.ACK:
bufferType = 'ARCONTROLLER_NETWORK_SENDING_DATA_TYPE_ACK'
elif cmd.bufferType == ArCmdBufferType.HIGH_PRIO:
bufferType = 'ARCONTROLLER_NETWORK_SENDING_DATA_TYPE_HIGH_PRIORITY'
timeoutPolicy = 'ARNETWORK_MANAGER_CALLBACK_RETURN_DATA_POP'
if cmd.timeoutPolicy == ArCmdTimeoutPolicy.POP:
timeoutPolicy = 'ARNETWORK_MANAGER_CALLBACK_RETURN_DATA_POP'
elif cmd.timeoutPolicy == ArCmdTimeoutPolicy.RETRY:
timeoutPolicy = 'ARNETWORK_MANAGER_CALLBACK_RETURN_RETRY'
cFile.write (' error = ARCONTROLLER_Network_SendData (feature->privatePart->networkController, cmdBuffer, cmdSize, '+bufferType+', '+timeoutPolicy+', &netError);\n')
''' TODO manage error !!!!!!!!!!!!
cFile.write (' \n')
cFile.write (' if ((error != ARCONTROLLER_OK) || (netError != ARNETWORK_OK))\n')
cFile.write (' {\n')
cFile.write (' \n')
cFile.write (' }\n')
'''
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' return error;\n')
cFile.write ('}\n')
cFile.write ('\n')
if cmd.bufferType == ArCmdBufferType.NON_ACK:
cFile.write ('eARCONTROLLER_ERROR ' + setNAckFunctionName (feature, cmd)+' ('+className+' *feature')
for arg in cmd.args:
cFile.write (', ' + xmlToC (MODULE_ARCOMMANDS, feature, cmd, arg) + ' _' + arg.name)
cFile.write (')\n')
cFile.write ('{\n')
cFile.write (' // -- Set the parameter for the command <code>' + ARCapitalize (format_cmd_name(cmd)) + '</code> in project <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code> --\n')
cFile.write (' \n')
cFile.write (' eARCONTROLLER_ERROR error = ARCONTROLLER_OK;\n')
cFile.write (' \n')
cFile.write (' // Check parameters\n')
cFile.write (' if ((feature == NULL) ||\n')
cFile.write (' (feature->privatePart == NULL) ||\n')
cFile.write (' (feature->privatePart->'+structNAckName (cmd)+' == NULL))\n')
cFile.write (' {\n')
cFile.write (' error = ARCONTROLLER_ERROR_BAD_PARAMETER;\n')
cFile.write (' }\n')
cFile.write (' // No Else: the checking parameters sets error to ARNETWORK_ERROR_BAD_PARAMETER and stop the processing\n')
cFile.write (' \n')
cFile.write (' if (error == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
for arg in cmd.args:
cFile.write (' feature->privatePart->'+structNAckName(cmd)+'->' + arg.name + ' = _'+arg.name+';\n')
cFile.write ('\n')
cFile.write (' '+nAckCbChange(feature, cmd)+' (feature);\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' return error;\n')
cFile.write ('}\n')
cFile.write ('\n')
cFile.write ('eARCONTROLLER_ERROR '+ sendNAckFunctionName (feature, cmd)+' ('+className+' *feature, u_int8_t *cmdBuffer, int32_t cmdBufferSize)\n')
cFile.write ('{\n')
cFile.write (' // -- Send the a command <code>' + ARCapitalize (format_cmd_name(cmd)) + '</code> in project <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code> with the parame set beforehand --\n')
cFile.write (' \n')
cFile.write (' eARCONTROLLER_ERROR error = ARCONTROLLER_OK;\n')
cFile.write (' eARCOMMANDS_GENERATOR_ERROR cmdError = ARCOMMANDS_GENERATOR_OK;\n')
cFile.write (' eARNETWORK_ERROR netError = ARNETWORK_OK;\n')
cFile.write (' int32_t cmdSize = 0;\n')
cFile.write (' \n')
cFile.write (' // Check parameters\n')
cFile.write (' if ((feature == NULL) ||\n')
cFile.write (' (feature->privatePart == NULL) ||\n')
cFile.write (' (feature->privatePart->'+structNAckName (cmd)+' == NULL) ||\n')
cFile.write (' (cmdBuffer == NULL))\n')
cFile.write (' {\n')
cFile.write (' error = ARCONTROLLER_ERROR_BAD_PARAMETER;\n')
cFile.write (' }\n')
cFile.write (' // No Else: the checking parameters sets error to ARNETWORK_ERROR_BAD_PARAMETER and stop the processing\n')
cFile.write (' \n')
cFile.write (' if (error == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' // Send ' + ARCapitalize(cmd.name) + ' command\n')
cFile.write (' cmdError = ARCOMMANDS_Generator_Generate' + ARCapitalize(get_ftr_old_name(feature)) + ARCapitalize(format_cmd_name(cmd)) + '(cmdBuffer, cmdBufferSize, &cmdSize')
for arg in cmd.args:
cFile.write (', feature->privatePart->'+structNAckName (cmd)+'->' + arg.name)
cFile.write(');\n')
cFile.write (' if (cmdError != ARCOMMANDS_GENERATOR_OK)\n')
cFile.write (' {\n')
cFile.write (' error = ARCONTROLLER_ERROR_COMMAND_GENERATING;\n')
cFile.write (' }\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' if (error == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' error = ARCONTROLLER_Network_SendData (feature->privatePart->networkController, cmdBuffer, cmdSize, ARCONTROLLER_NETWORK_SENDING_DATA_TYPE_NOT_ACK, ARNETWORK_MANAGER_CALLBACK_RETURN_DATA_POP, &netError);\n')
cFile.write (' if (netError != ARNETWORK_OK)\n')
cFile.write (' {\n')
cFile.write (' ARSAL_PRINT(ARSAL_PRINT_ERROR, ARCONTROLLER_FEATURE_TAG, "Network sending error : %s", ARNETWORK_Error_ToString (netError));\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' return error;\n')
cFile.write ('}\n')
cFile.write ('\n')
for arg in cmd.args:
cFile.write ('eARCONTROLLER_ERROR ' + setNAckFunctionName (feature, cmd, arg)+' ('+className+' *feature, ' + xmlToC (MODULE_ARCOMMANDS, feature, cmd, arg) + ' _'+ arg.name +')\n')
cFile.write ('{\n')
cFile.write (' // -- Set the '+arg.name+' for the command <code>' + ARCapitalize (format_cmd_name(cmd)) + '</code> in project <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code> --\n')
cFile.write (' \n')
cFile.write (' eARCONTROLLER_ERROR error = ARCONTROLLER_OK;\n')
cFile.write (' \n')
cFile.write (' // Check parameters\n')
cFile.write (' if ((feature == NULL) ||\n')
cFile.write (' (feature->privatePart == NULL) ||\n')
cFile.write (' (feature->privatePart->'+structNAckName (cmd)+' == NULL))\n')
cFile.write (' {\n')
cFile.write (' error = ARCONTROLLER_ERROR_BAD_PARAMETER;\n')
cFile.write (' }\n')
cFile.write (' // No Else: the checking parameters sets error to ARNETWORK_ERROR_BAD_PARAMETER and stop the processing\n')
cFile.write (' \n')
cFile.write (' if (error == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' feature->privatePart->'+structNAckName(cmd)+'->' + arg.name + ' = _'+arg.name+';\n')
cFile.write ('\n')
cFile.write (' '+nAckCbChange(feature, cmd)+' (feature);\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' return error;\n')
cFile.write ('}\n')
cFile.write ('\n')
for evt in msgs_without_multiset(feature.evts):
cFile.write ('void '+decodeCallback (feature, evt)+' (')
for arg in evt.args:
cFile.write (xmlToC (MODULE_ARCOMMANDS, feature, evt, arg, True) + ' _' + arg.name + ', ')
cFile.write ('void *customData)\n')
cFile.write ('{\n')
cFile.write (' // -- callback used when the command <code>' + ARCapitalize (format_cmd_name(evt)) + '</code> is decoded -- \n')
cFile.write (' \n')
cFile.write (' '+className+' *feature = ('+className+' *)customData;\n')
cFile.write (' eARCONTROLLER_ERROR error = ARCONTROLLER_OK;\n')
cFile.write (' int commandKey = '+defineNotification(feature, evt)+';\n')
#if not evt.isNotif: #TODO add
cFile.write (' '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'COMMANDS')+' *dictCmdElement = NULL;\n')
cFile.write (' int isANewCommandElement = 0;\n')
cFile.write (' int elementAdded = 0;\n')
cFile.write (' '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ELEMENT')+' *newElement = NULL;\n')
if evt.listType == ArCmdListType.LIST:
cFile.write (' int listIndex = 0;\n')
if _LIST_FLAG in evt.argsByName:
list_flags = getGenericListFlagsEnum(ctx)
if _LIST_FLAG in evt.argsByName and evt.listType == ArCmdListType.MAP:
cFile.write (' int remove = (_'+evt.argsByName[_LIST_FLAG].name+' & ' + ARFlagValue (MODULE_ARCOMMANDS, 'generic', list_flags.name, 'remove')+');\n')
cFile.write (' int clear = (_'+evt.argsByName[_LIST_FLAG].name+' & (' + ARFlagValue (MODULE_ARCOMMANDS, 'generic', list_flags.name, 'first') +' | ' + ARFlagValue (MODULE_ARCOMMANDS, 'generic', list_flags.name, 'empty')+'));\n')
cFile.write (' int notify = (_'+evt.argsByName[_LIST_FLAG].name+' & (' + ARFlagValue (MODULE_ARCOMMANDS, 'generic', list_flags.name, 'last') +' | ' + ARFlagValue (MODULE_ARCOMMANDS, 'generic', list_flags.name, 'empty')+'));\n')
cFile.write (' int add = !(_'+evt.argsByName[_LIST_FLAG].name+' & (' + ARFlagValue (MODULE_ARCOMMANDS, 'generic', list_flags.name, 'remove') +' | ' + ARFlagValue (MODULE_ARCOMMANDS, 'generic', list_flags.name, 'empty')+'));\n')
cFile.write ('\n')
if evt.listType == ArCmdListType.MAP:
cFile.write (' ARCONTROLLER_DICTIONARY_ELEMENT_t *dictElement = NULL;\n')
if not evt.mapKey.argType == ArArgType.STRING:
cFile.write (' int elementKeyLength = 0;\n')
cFile.write (' char *elementKey = NULL;\n')
cFile.write ('\n')
cFile.write (' // Check parameters\n')
cFile.write (' if ((feature == NULL) || (feature->privatePart == NULL))\n')
cFile.write (' {\n')
cFile.write (' error = ARCONTROLLER_ERROR_BAD_PARAMETER;\n')
cFile.write (' }\n')
cFile.write (' // No Else: the checking parameters sets error to ARNETWORK_ERROR_BAD_PARAMETER and stop the processing\n')
cFile.write (' \n')
if _LIST_FLAG in evt.argsByName:
cFile.write (' if (error == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' // Find command elements\n')
cFile.write (' ARSAL_Mutex_Lock (&(feature->privatePart->mutex));\n')
cFile.write (' HASH_FIND_INT (feature->privatePart->dictionary, &commandKey, dictCmdElement);\n')
cFile.write (' ARSAL_Mutex_Unlock (&(feature->privatePart->mutex));\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' if ((error == ARCONTROLLER_OK) && (dictCmdElement != NULL) && (clear))\n')
cFile.write (' {\n')
cFile.write (' //Delete the command\n')
cFile.write (' ARSAL_Mutex_Lock (&(feature->privatePart->mutex));\n')
cFile.write (' HASH_DEL (feature->privatePart->dictionary, dictCmdElement);\n')
cFile.write (' ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'DeleteCommandsElement')+'(&dictCmdElement);\n')
cFile.write (' ARSAL_Mutex_Unlock (&(feature->privatePart->mutex));\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' if (error == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
if evt.listType == ArCmdListType.MAP:
cFile.write (' if ((remove) && (dictCmdElement != NULL))\n')
cFile.write (' {\n')
cFile.write (' //remove element\n')
cFile.write (' ARSAL_Mutex_Lock (&(feature->privatePart->mutex));\n\n')
if not evt.mapKey.argType == ArArgType.STRING:
cFile.write (' elementKeyLength = snprintf (NULL, 0, '+xmlToFormat(evt.mapKey)+', _'+evt.mapKey.name+');\n')
cFile.write (' elementKey = malloc (elementKeyLength + 1);\n')
cFile.write (' if (elementKey != NULL)\n')
cFile.write (' {\n')
cFile.write (' snprintf (elementKey, (elementKeyLength + 1), '+xmlToFormat(evt.mapKey)+', _'+evt.mapKey.name+');\n')
cFile.write (' HASH_FIND_STR (dictCmdElement->elements, elementKey, dictElement);\n')
cFile.write (' }\n')
else:
cFile.write (' HASH_FIND_STR (dictCmdElement->elements, _'+evt.mapKey.name+', dictElement);\n')
cFile.write (' if (dictElement != NULL)\n')
cFile.write (' {\n')
cFile.write (' HASH_DEL (dictCmdElement->elements, dictElement);\n')
cFile.write (' ARCONTROLLER_Feature_DeleteElement (&dictElement);\n')
cFile.write (' }\n')
if not evt.mapKey.argType == ArArgType.STRING:
cFile.write (' /* cleanup */\n')
cFile.write (' free (elementKey);\n')
cFile.write (' elementKey = NULL;\n\n')
cFile.write (' ARSAL_Mutex_Unlock (&(feature->privatePart->mutex));\n')
cFile.write (' // force notifying when removing because the Mambo does not send last when removing a usbAccessory\n')
cFile.write (' notify = 1;\n')
cFile.write (' }\n')
cFile.write ('\n')
cFile.write (' if (add)\n')
cFile.write (' {\n')
#if not evt.isNotif: #TODO add
cFile.write (' if (dictCmdElement == NULL)\n')
cFile.write (' {\n')
cFile.write (' // New command element\n')
cFile.write (' isANewCommandElement = 1;\n')
cFile.write (' dictCmdElement = ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'NewCommandsElement')+' (commandKey, &error);\n')
cFile.write (' }\n')
cFile.write (' // No Else ; commandElement already exists.\n')
cFile.write (' \n')
#if not evt.isNotif: #TODO add
if evt.listType == ArCmdListType.LIST:
cFile.write (' if (error == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' ARSAL_Mutex_Lock (&(feature->privatePart->mutex));\n')
cFile.write (' listIndex = HASH_COUNT (dictCmdElement->elements);\n')
cFile.write (' ARSAL_Mutex_Unlock (&(feature->privatePart->mutex));\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' //Create new element\n')
cFile.write (' newElement = '+ ARFunctionName (MODULE_ARCONTROLLER, get_ftr_old_name(feature), 'newCmdElement'+ARCapitalize(format_cmd_name(evt)))+' (feature,')
for arg in evt.args:
cFile.write (' _' + arg.name + ', ')
if evt.listType == ArCmdListType.LIST:
cFile.write ('listIndex, ')
cFile.write ('&error);\n')
cFile.write (' \n')
#if not evt.isNotif: #TODO add
cFile.write (' //Set new element in CommandElements \n')
cFile.write (' if (error == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' ARSAL_Mutex_Lock (&(feature->privatePart->mutex));\n')
cFile.write (' \n')
cFile.write (' ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'AddElement')+' (&(dictCmdElement->elements), newElement);\n')
cFile.write (' \n')
cFile.write (' //Add new commandElement if necessary\n')
cFile.write (' if (isANewCommandElement)\n')
cFile.write (' {\n')
cFile.write (' HASH_ADD_INT (feature->privatePart->dictionary, command, dictCmdElement);\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' elementAdded = 1;\n')
cFile.write (' \n')
cFile.write (' ARSAL_Mutex_Unlock (&(feature->privatePart->mutex));\n')
cFile.write (' }\n')
cFile.write (' }\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' if ((error == ARCONTROLLER_OK) && (notify))\n')
cFile.write (' {\n')
cFile.write (' // Notification Callback\n')
cFile.write (' if (dictCmdElement != NULL) {\n')
cFile.write (' error = ARCONTROLLER_Dictionary_Notify (feature->privatePart->commandCallbacks, dictCmdElement->command, dictCmdElement->elements);\n')
cFile.write (' } else {\n')
cFile.write (' error = ARCONTROLLER_Dictionary_Notify (feature->privatePart->commandCallbacks, commandKey, NULL);\n')
cFile.write (' }\n')
cFile.write (' }\n')
cFile.write (' \n')
else:
#if not evt.isNotif: #TODO add
cFile.write (' if (error == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' // Find command elements\n')
cFile.write (' ARSAL_Mutex_Lock (&(feature->privatePart->mutex));\n')
cFile.write (' HASH_FIND_INT (feature->privatePart->dictionary, &commandKey, dictCmdElement);\n')
cFile.write (' ARSAL_Mutex_Unlock (&(feature->privatePart->mutex));\n')
cFile.write (' \n')
cFile.write (' if (dictCmdElement == NULL)\n')
cFile.write (' {\n')
cFile.write (' // New command element\n')
cFile.write (' isANewCommandElement = 1;\n')
cFile.write (' dictCmdElement = ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'NewCommandsElement')+' (commandKey, &error);\n')
cFile.write (' }\n')
cFile.write (' // No Else ; commandElement already exists.\n')
cFile.write (' \n')
cFile.write (' }\n')
cFile.write (' \n')
#if not evt.isNotif: #TODO add
if evt.listType == ArCmdListType.LIST:
cFile.write (' if (error == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' listIndex = HASH_COUNT (dictCmdElement->elements);\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' if (error == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' //Create new element\n')
cFile.write (' newElement = '+ ARFunctionName (MODULE_ARCONTROLLER, get_ftr_old_name(feature), 'newCmdElement'+ARCapitalize(format_cmd_name(evt)))+' (feature, ')
for arg in evt.args:
cFile.write (' _' + arg.name + ', ')
if evt.listType == ArCmdListType.LIST:
cFile.write ('listIndex, ')
cFile.write ('&error);\n')
cFile.write (' }\n')
cFile.write (' \n')
#if not evt.isNotif: #TODO add
cFile.write (' //Set new element in CommandElements \n')
cFile.write (' if (error == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' ARSAL_Mutex_Lock (&(feature->privatePart->mutex));\n')
cFile.write (' \n')
cFile.write (' ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'AddElement')+' (&(dictCmdElement->elements), newElement);\n')
cFile.write (' \n')
cFile.write (' //Add new commandElement if necessary\n')
cFile.write (' if (isANewCommandElement)\n')
cFile.write (' {\n')
cFile.write (' HASH_ADD_INT (feature->privatePart->dictionary, command, dictCmdElement);\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' elementAdded = 1;\n')
cFile.write (' \n')
cFile.write (' ARSAL_Mutex_Unlock (&(feature->privatePart->mutex));\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' if (error == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' // Notification Callback\n')
cFile.write (' error = ARCONTROLLER_Dictionary_Notify (feature->privatePart->commandCallbacks, dictCmdElement->command, dictCmdElement->elements);\n')
cFile.write (' }\n')
cFile.write (' \n')
#TODO sup new element notif
#if not evt.isNotif: #TODO add
cFile.write (' // if an error occurred \n')
cFile.write (' if (error != ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' // cleanup\n')
#cFile.write (' if ((dictCmdElement != NULL) && (!elementAdded ))\n')
cFile.write (' if ((dictCmdElement != NULL) && (isANewCommandElement))\n')
cFile.write (' {\n')
cFile.write (' ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'DeleteCommandsElement')+'(&dictCmdElement);\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' if ((newElement != NULL) && (!elementAdded ))\n')
cFile.write (' {\n')
cFile.write (' ' + ARFunctionName (MODULE_ARCONTROLLER, 'feature', 'DeleteElement')+' (&newElement);\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write ('}\n')
cFile.write ('\n')
for evt in msgs_with_multiset(feature.evts):
cFile.write ('void '+decodeCallback (feature, evt)+' (')
for arg in evt.args:
cFile.write (xmlToC (MODULE_ARCOMMANDS, feature, evt, arg, True) + ' _' + arg.name + ', ')
cFile.write ('void *customData)\n')
cFile.write ('{\n')
cFile.write (' // -- callback used when the command <code>' + ARCapitalize (format_cmd_name(evt)) + '</code> is decoded -- \n')
cFile.write (' \n')
cFile.write (' '+className+' *feature = ('+className+' *)customData;\n')
cFile.write (' ARCOMMANDS_Decoder_Decode'+ARCapitalize(feature.name)+ARCapitalize(evt.name)+' (feature->privatePart->networkController->decoder, _'+arg.name+');\n')
cFile.write ('}\n')
cFile.write ('\n')
for evt in msgs_without_multiset(feature.evts):
cFile.write (''+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ELEMENT')+' *'+ ARFunctionName (MODULE_ARCONTROLLER, get_ftr_old_name(feature), 'newCmdElement'+ARCapitalize(format_cmd_name(evt)))+' ('+className+' *feature, ')
for arg in evt.args:
cFile.write (xmlToC (MODULE_ARCOMMANDS, feature, evt, arg, True) + ' _' + arg.name + ', ')
if evt.listType == ArCmdListType.LIST:
cFile.write ('int listIndex, ')
cFile.write ('eARCONTROLLER_ERROR *error)\n')
cFile.write ('{\n')
cFile.write (' // -- Create element of an event '+ARCapitalize(format_cmd_name(evt))+' -- \n')
cFile.write (' \n')
cFile.write (' '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ELEMENT')+' *newElement = NULL;\n')
cFile.write (' int elementKeyLength = 0;\n')
if evt.args:
cFile.write (' '+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ARG')+' *argDictNewElement = NULL;\n')
if [ a for a in evt.args if a.argType == ArArgType.STRING ]:
cFile.write (' int strLength = 0;\n')
cFile.write (' eARCONTROLLER_ERROR localError = ARCONTROLLER_OK;\n')
cFile.write (' \n')
cFile.write (' // Check parameters\n')
cFile.write (' if ((feature == NULL) || (feature->privatePart == NULL))\n')
cFile.write (' {\n')
cFile.write (' localError = ARCONTROLLER_ERROR_BAD_PARAMETER;\n')
cFile.write (' }\n')
cFile.write (' // No Else: the checking parameters sets error to ARNETWORK_ERROR_BAD_PARAMETER and stop the processing\n')
cFile.write (' \n')
cFile.write (' //Create Element Dictionary\n')
cFile.write (' if (localError == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' // New element\n')
cFile.write (' newElement = malloc (sizeof('+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ELEMENT')+'));\n')
cFile.write (' if (newElement != NULL)\n')
cFile.write (' {\n')
cFile.write (' newElement->key = NULL;\n')
cFile.write (' newElement->arguments = NULL;\n')
cFile.write (' }\n')
cFile.write (' else\n')
cFile.write (' {\n')
cFile.write (' localError = ARCONTROLLER_ERROR_ALLOC;\n')
cFile.write (' }\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' if (localError == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
if evt.listType == ArCmdListType.LIST:
cFile.write (' ARSAL_Mutex_Lock (&(feature->privatePart->mutex));\n')
cFile.write (' \n')
cFile.write (' //Alloc Element Key\n')
if evt.listType == ArCmdListType.MAP:
if evt.mapKey.argType == ArArgType.STRING:
cFile.write (' elementKeyLength = strlen (_'+evt.mapKey.name+');\n')
else:
cFile.write (' elementKeyLength = snprintf (NULL, 0, '+xmlToFormat(evt.mapKey)+', _'+evt.mapKey.name+');\n')
elif evt.listType == ArCmdListType.LIST:
cFile.write (' elementKeyLength = snprintf (NULL, 0, "%d", listIndex);\n')
elif evt.listType == ArCmdListType.NONE:
cFile.write (' elementKeyLength = strlen (ARCONTROLLER_DICTIONARY_SINGLE_KEY);\n')
cFile.write (' newElement->key = malloc (elementKeyLength + 1);\n')
cFile.write (' if (newElement->key != NULL)\n')
cFile.write (' {\n')
if evt.listType == ArCmdListType.MAP:
if evt.mapKey.argType == ArArgType.STRING:
cFile.write (' strncpy (newElement->key, _'+evt.mapKey.name+', (elementKeyLength + 1));\n')
else:
cFile.write (' snprintf (newElement->key, (elementKeyLength + 1), '+xmlToFormat(evt.mapKey)+', _'+evt.mapKey.name+');\n')
elif evt.listType == ArCmdListType.LIST:
cFile.write (' snprintf (newElement->key, (elementKeyLength + 1), "%d", listIndex);\n')
elif evt.listType == ArCmdListType.NONE:
cFile.write (' strncpy (newElement->key, ARCONTROLLER_DICTIONARY_SINGLE_KEY, (elementKeyLength + 1));\n')
cFile.write (' newElement->key[elementKeyLength] = \'\\0\';\n')
cFile.write (' }\n')
cFile.write (' else\n')
cFile.write (' {\n')
cFile.write (' localError = ARCONTROLLER_ERROR_ALLOC;\n')
cFile.write (' }\n')
if evt.listType == ArCmdListType.LIST:
cFile.write (' \n')
cFile.write (' ARSAL_Mutex_Unlock (&(feature->privatePart->mutex));\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' //Create argument Dictionary\n')
for arg in [arg1 for arg1 in evt.args if arg1.name != _LIST_FLAG]:
cFile.write (' //Add argument To the element\n')
cFile.write (' if (localError == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' // New argument element\n')
cFile.write (' argDictNewElement = malloc (sizeof('+ARTypeName(MODULE_ARCONTROLLER, 'DICTIONARY', 'ARG')+'));\n')
cFile.write (' if (argDictNewElement != NULL)\n')
cFile.write (' {\n')
if isinstance(arg.argType, ArEnum):
cFile.write (' argDictNewElement->valueType = '+AREnumValue(MODULE_ARCONTROLLER, 'DICTIONARY', 'VALUE_TYPE', 'ENUM')+';\n')
elif isinstance(arg.argType, ArBitfield):
cFile.write (' argDictNewElement->valueType = '+AREnumValue(MODULE_ARCONTROLLER, 'DICTIONARY', 'VALUE_TYPE', ArArgType.TO_STRING[arg.argType.btfType])+';\n')
else:
cFile.write (' argDictNewElement->valueType = '+AREnumValue(MODULE_ARCONTROLLER, 'DICTIONARY', 'VALUE_TYPE', ArArgType.TO_STRING[arg.argType])+';\n')
cFile.write (' argDictNewElement->argument = '+defineNotification(feature, evt, arg)+';\n')
if arg.argType == ArArgType.STRING:
cFile.write (' strLength = strlen (_'+arg.name+');\n')
cFile.write (' argDictNewElement->value.'+ARCapitalize(ArArgType.TO_STRING[arg.argType])+' = malloc (strLength + 1);\n')
cFile.write (' if (argDictNewElement->value.'+ARCapitalize(ArArgType.TO_STRING[arg.argType])+' != NULL)\n')
cFile.write (' {\n')
cFile.write (' strncpy (argDictNewElement->value.'+ARCapitalize(ArArgType.TO_STRING[arg.argType])+', _'+arg.name+', strLength);\n')
cFile.write (' argDictNewElement->value.'+ARCapitalize(ArArgType.TO_STRING[arg.argType])+'[strLength] = \'\\0\';\n')
cFile.write (' }\n')
cFile.write (' else\n')
cFile.write (' {\n')
cFile.write (' localError = ARCONTROLLER_ERROR_ALLOC;\n')
cFile.write (' }\n')
elif isinstance(arg.argType, ArEnum):
cFile.write (' argDictNewElement->value.'+ARCapitalize('i32')+' = _'+arg.name+';\n')
elif isinstance(arg.argType, ArBitfield):
cFile.write (' argDictNewElement->value.'+ARCapitalize(ArArgType.TO_STRING[arg.argType.btfType])+' = _'+arg.name+';\n')
else:
cFile.write (' argDictNewElement->value.'+ARCapitalize(ArArgType.TO_STRING[arg.argType])+' = _'+arg.name+';\n')
cFile.write (' \n')
if arg.argType == ArArgType.STRING:
cFile.write (' if (localError == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' HASH_ADD_KEYPTR (hh, newElement->arguments, argDictNewElement->argument, strlen(argDictNewElement->argument), argDictNewElement);\n')
cFile.write (' }\n')
else:
cFile.write (' HASH_ADD_KEYPTR (hh, newElement->arguments, argDictNewElement->argument, strlen(argDictNewElement->argument), argDictNewElement);\n')
cFile.write (' }\n')
cFile.write (' else\n')
cFile.write (' {\n')
cFile.write (' localError = ARCONTROLLER_ERROR_ALLOC;\n')
cFile.write (' }\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' // If an error occurred \n')
cFile.write (' if (localError != ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' // cleanup\n')
cFile.write (' if (newElement != NULL)\n')
cFile.write (' {\n')
cFile.write (' if (newElement->arguments != NULL)\n')
cFile.write (' {\n')
for arg in evt.args:
if arg.argType == ArArgType.STRING:
cFile.write (' if (newElement->arguments->value.'+ARCapitalize(ArArgType.TO_STRING[arg.argType])+' != NULL)\n')
cFile.write (' {\n')
cFile.write (' free(newElement->arguments->value.'+ARCapitalize(ArArgType.TO_STRING[arg.argType])+');\n')
cFile.write (' newElement->arguments->value.'+ARCapitalize(ArArgType.TO_STRING[arg.argType])+' = NULL;\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' free (newElement->arguments);\n')
cFile.write (' newElement->arguments = NULL;\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' if (newElement->key != NULL)\n')
cFile.write (' {\n')
cFile.write (' free (newElement->key);\n')
cFile.write (' newElement->key = NULL;\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' free (newElement);\n')
cFile.write (' newElement = NULL;\n')
cFile.write (' }\n')
if evt.args:
cFile.write ('\n')
cFile.write (' free (argDictNewElement);\n')
cFile.write (' argDictNewElement = NULL;\n')
cFile.write (' }\n')
cFile.write (' // Return the error\n')
cFile.write (' if (error != NULL)\n')
cFile.write (' {\n')
cFile.write (' *error = localError;\n')
cFile.write (' }\n')
cFile.write (' // No else: error is not returned \n')
cFile.write (' \n')
cFile.write (' return newElement;\n')
cFile.write ('}\n')
cFile.write ('\n')
cFile.write ('eARCONTROLLER_ERROR '+ARFunctionName(MODULE_FEATURE, get_ftr_old_name(feature), 'SetNetworkController')+' ('+className+' *feature, ARCONTROLLER_Network_t *networkController)\n')
cFile.write ('{\n')
cFile.write (' // -- Set a NetworkController to use to send commands. --\n')
cFile.write (' \n')
cFile.write (' eARCONTROLLER_ERROR error = ARCONTROLLER_OK;\n')
cFile.write (' \n')
cFile.write (' // Check parameters\n')
cFile.write (' if ((feature == NULL) || (feature->privatePart == NULL))\n')
cFile.write (' {\n')
cFile.write (' error = ARCONTROLLER_ERROR_BAD_PARAMETER;\n')
cFile.write (' }\n')
cFile.write (' // No Else: the checking parameters sets error to ARNETWORK_ERROR_BAD_PARAMETER and stop the processing\n')
cFile.write (' \n')
cFile.write (' if (error == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' feature->privatePart->networkController = networkController;\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' return error;\n')
cFile.write ('}\n')
cFile.write ('\n')
cFile.write ('ARCONTROLLER_DICTIONARY_ELEMENT_t *' + ARFunctionName (MODULE_ARCONTROLLER, get_ftr_old_name(feature), 'GetCommandElements')+' ('+className+' *feature, '+defineNotificationDef()+' commandKey, eARCONTROLLER_ERROR *error)\n')
cFile.write ('{\n')
cFile.write (' // -- Get Command Arguments --\n')
cFile.write (' \n')
cFile.write (' eARCONTROLLER_ERROR localError = ARCONTROLLER_OK;\n')
cFile.write (' ARCONTROLLER_DICTIONARY_COMMANDS_t *commandDic = NULL;\n')
cFile.write (' ARCONTROLLER_DICTIONARY_ELEMENT_t *elements = NULL;\n')
cFile.write (' \n')
cFile.write (' // Check parameters\n')
cFile.write (' if ((feature == NULL) ||\n')
cFile.write (' (feature->privatePart == NULL))\n')
cFile.write (' {\n')
cFile.write (' localError = ARCONTROLLER_ERROR_BAD_PARAMETER;\n')
cFile.write (' }\n')
cFile.write (' // No Else: the checking parameters sets localError to ARNETWORK_ERROR_BAD_PARAMETER and stop the processing\n')
cFile.write (' \n')
cFile.write (' if (localError == ARCONTROLLER_OK)\n')
cFile.write (' {\n')
cFile.write (' ARSAL_Mutex_Lock (&(feature->privatePart->mutex));\n')
cFile.write (' \n')
cFile.write (' // Find elements\n')
cFile.write (' HASH_FIND_INT (feature->privatePart->dictionary, &(commandKey), commandDic);\n')
cFile.write (' if (commandDic != NULL)\n')
cFile.write (' {\n')
cFile.write (' elements = commandDic->elements;\n')
cFile.write (' }\n')
cFile.write (' // NO Else ; Command not found \n')
cFile.write (' \n')
cFile.write (' ARSAL_Mutex_Unlock (&(feature->privatePart->mutex));\n')
cFile.write (' \n')
cFile.write (' if (elements == NULL)\n')
cFile.write (' {\n')
cFile.write (' localError = ARCONTROLLER_ERROR_NO_ELEMENT;\n')
cFile.write (' }\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' // Return the error\n')
cFile.write (' if (error != NULL)\n')
cFile.write (' {\n')
cFile.write (' *error = localError;\n')
cFile.write (' }\n')
cFile.write (' // No else: error is not returned \n')
cFile.write (' \n')
cFile.write (' return elements;\n')
cFile.write ('}\n')
cFile.write ('\n')
cFile.write ('/************************\n')
cFile.write (' * Private Implementation\n')
cFile.write (' *************************/\n')
cFile.close () # see automake all source of folder !!!!!!!!
def generateFeatureControllersJava (ctx, JNI_JAVA_DIR):
#########################################
# Write Device controller JNI java file #
#########################################
for feature in ctx.features:
className = 'ARFeature'+ ARCapitalize(get_ftr_old_name(feature))
classPrivateName = ARTypeName (MODULE_ARCONTROLLER, 'device', 'private')
fileName = className+'.java'
filepath = JNI_JAVA_DIR + fileName
jfile = open (filepath, 'w')
jfile.write ('/**********************************************************\n')
jfile.write (' * AUTOGENERATED FILE *\n')
jfile.write (' * DO NOT MODIFY IT *\n')
jfile.write (' * *\n')
jfile.write (' * To add new commands : *\n')
jfile.write (' * - Modify ../Xml/commands.xml file *\n')
jfile.write (' * - Re-run generateDeviceControllers.py script *\n')
jfile.write (' * *\n')
jfile.write (' **********************************************************/\n')
jfile.write ('\n')
jfile.write ('/**\n')
jfile.write (' * @file '+fileName+'\n')
jfile.write (' * @brief Feature controller allow to send command related of '+get_ftr_old_name(feature)+' Feature.\n')
jfile.write (' * ' + feature.doc.replace('\n', '\n * ')+'\n')
jfile.write (' */\n')
jfile.write ('package com.parrot.arsdk.arcontroller;\n')
jfile.write ('\n')
jfile.write ('import com.parrot.arsdk.arsal.ARSALPrint;\n')
jfile.write ('import com.parrot.arsdk.arcommands.*;\n')
jfile.write ('import com.parrot.arsdk.ardiscovery.ARDiscoveryDevice;\n')
jfile.write ('\n')
jfile.write ('import java.util.List;\n')
jfile.write ('import java.util.ArrayList;\n')
jfile.write ('\n')
jfile.write ('public class '+className+'\n')
jfile.write ('{\n')
jfile.write (' private static String TAG = "'+className+'";\n')
jfile.write (' \n')
for evt in feature.evts:
for arg in [arg1 for arg1 in evt.args if arg1.name != _LIST_FLAG]:
jfile.write (' public static String ' + defineNotification(feature, evt, arg) + ' = ""; /**< Key of the argument </code>'+arg.name+'</code> of event <code>' + ARCapitalize (format_cmd_name(evt)) + '</code> in feature <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code> */\n')
jfile.write ('\n')
for evt in feature.evts:
for arg in evt.args:
jfile.write (' private static native String ' + nativeGetNotificationVal(feature, evt, arg) + ' ();\n')
jfile.write ('\n')
for cmd in feature.cmds:
jfile.write (' private native int '+nativeSendingFunction(cmd)+' (long jFeature')
for arg in cmd.args:
if isinstance(arg.argType, ArEnum):
jfile.write (', int ' + arg.name + '')
elif isinstance(arg.argType, ArMultiSetting):
for multiset_msg in arg.argType.msgs:
jfile.write (', int '+ARCapitalize(multiset_msg.ftr.name)+ARCapitalize(multiset_msg.name)+'IsSet')
for multiset_msg_arg in multiset_msg.args:
jfile.write (', ' + xmlToJava (LIB_MODULE, multiset_msg.ftr, multiset_msg, multiset_msg_arg) + ' '+ARUncapitalize(multiset_msg.ftr.name)+ARCapitalize(multiset_msg.name)+ARCapitalize(multiset_msg_arg.name)+'')
else:
jfile.write (', ' + xmlToJava (MODULE_ARCOMMANDS, feature, cmd, arg) + ' ' + arg.name + '')
jfile.write (');\n')
if cmd.bufferType == ArCmdBufferType.NON_ACK:
jfile.write (' private native int '+nativeSetNAckFunction(cmd)+' (long jFeature')
for arg in cmd.args:
if isinstance(arg.argType, ArEnum):
jfile.write (', int ' + arg.name + '')
else:
jfile.write (', ' + xmlToJava (MODULE_ARCOMMANDS, feature, cmd, arg) + ' ' + arg.name + '')
jfile.write (');\n')
for arg in cmd.args:
if isinstance(arg.argType, ArEnum):
jfile.write (' private native int '+nativeSetNAckFunction(cmd, arg)+' (long jFeature, int ' + arg.name + ');\n')
else:
jfile.write (' private native int '+nativeSetNAckFunction(cmd, arg)+' (long jFeature, ' + xmlToJava (MODULE_ARCOMMANDS, feature, cmd, arg) + ' ' + arg.name + ');\n')
jfile.write ('\n')
jfile.write (' private long jniFeature;\n')
jfile.write (' private boolean initOk;\n')
jfile.write (' \n')
jfile.write (' static\n')
jfile.write (' {\n')
for evt in feature.evts:
for arg in [arg1 for arg1 in evt.args if arg1.name != _LIST_FLAG]:
jfile.write (' ' + defineNotification(feature, evt, arg) + ' = '+ nativeGetNotificationVal(feature, evt, arg) + ' ();\n')
jfile.write (' }\n')
jfile.write (' \n')
jfile.write (' /**\n')
jfile.write (' * Constructor\n')
jfile.write (' */\n')
jfile.write (' public '+className+' (long nativeFeature)\n')
jfile.write (' {\n')
jfile.write (' initOk = false;\n')
jfile.write (' \n')
jfile.write (' if (nativeFeature != 0)\n')
jfile.write (' {\n')
jfile.write (' jniFeature = nativeFeature;\n')
jfile.write (' initOk = true;\n')
jfile.write (' }\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Dispose\n')
jfile.write (' */\n')
jfile.write (' public void dispose()\n')
jfile.write (' {\n')
jfile.write (' ARCONTROLLER_ERROR_ENUM error = ARCONTROLLER_ERROR_ENUM.ARCONTROLLER_OK;\n')
jfile.write (' synchronized (this)\n')
jfile.write (' {\n')
jfile.write (' if(initOk == true)\n')
jfile.write (' {\n')
jfile.write (' jniFeature = 0;\n')
jfile.write (' initOk = false;\n')
jfile.write (' }\n')
jfile.write (' }\n')
jfile.write (' }\n')
jfile.write ('\n')
jfile.write (' /**\n')
jfile.write (' * Destructor\n')
jfile.write (' */\n')
jfile.write (' public void finalize () throws Throwable\n')
jfile.write (' {\n')
jfile.write (' try\n')
jfile.write (' {\n')
jfile.write (' dispose ();\n')
jfile.write (' }\n')
jfile.write (' finally\n')
jfile.write (' {\n')
jfile.write (' super.finalize ();\n')
jfile.write (' }\n')
jfile.write (' }\n')
jfile.write (' \n')
for cmd in feature.cmds:
jfile.write (' /**\n')
jfile.write (' * Send a command <code>' + ARCapitalize (format_cmd_name(cmd)) + '</code>\n')
if cmd.isDeprecated:
jfile.write (' * @deprecated\n')
jfile.write (' * ' + cmd.doc.desc.replace('\n', '\n * ')+'\n')
for arg in cmd.args:
jfile.write (' * @param ' + arg.name + ' ' + get_arg_doc(arg).replace('\n', ' ') + '\n')
jfile.write (' * return executing error\n')
jfile.write (' */\n')
jfile.write (' public ARCONTROLLER_ERROR_ENUM '+sendingFunction (cmd)+' (')
first = True
for arg in cmd.args:
if first:
first = False
else :
jfile.write (', ')
jfile.write (xmlToJava (MODULE_ARCOMMANDS, feature, cmd, arg) + ' _' + arg.name + '')
jfile.write (')\n')
jfile.write (' {\n')
jfile.write (' ARCONTROLLER_ERROR_ENUM error = ARCONTROLLER_ERROR_ENUM.ARCONTROLLER_OK;\n')
jfile.write (' synchronized (this)\n')
jfile.write (' {\n')
jfile.write (' if(initOk == true)\n')
jfile.write (' {\n')
jfile.write (' int nativeError = '+nativeSendingFunction(cmd)+' (jniFeature')
for arg in cmd.args:
if isinstance(arg.argType, ArEnum):
jfile.write (', _' + arg.name + '.getValue()')
elif isinstance(arg.argType, ArMultiSetting):
for multiset_msg in arg.argType.msgs:
jfile.write (', _' + arg.name + '.get'+ARCapitalize(multiset_msg.ftr.name)+ARCapitalize(multiset_msg.name)+'IsSet()')
for multiset_msg_arg in multiset_msg.args:
jfile.write (', _' + arg.name + '.get'+ARCapitalize(multiset_msg.ftr.name)+ARCapitalize(multiset_msg.name)+ARCapitalize(multiset_msg_arg.name)+'()')
else:
jfile.write (', _' + arg.name)
jfile.write (');\n')
jfile.write (' error = ARCONTROLLER_ERROR_ENUM.getFromValue(nativeError);\n')
jfile.write (' }\n')
jfile.write (' }\n')
jfile.write (' return error;\n')
jfile.write (' }\n')
jfile.write (' \n')
if cmd.bufferType == ArCmdBufferType.NON_ACK:
jfile.write (' public ARCONTROLLER_ERROR_ENUM '+javaSetNAckFunction (cmd)+' (')
isFirst = True
for arg in cmd.args:
if isFirst:
isFirst = False
else:
jfile.write (', ')
jfile.write (xmlToJava (MODULE_ARCOMMANDS, feature, cmd, arg) + ' _' + arg.name)
jfile.write (')\n')
jfile.write (' {\n')
jfile.write (' ARCONTROLLER_ERROR_ENUM error = ARCONTROLLER_ERROR_ENUM.ARCONTROLLER_OK;\n')
jfile.write (' synchronized (this)\n')
jfile.write (' {\n')
jfile.write (' if(initOk == true)\n')
jfile.write (' {\n')
jfile.write (' int nativeError = '+nativeSetNAckFunction(cmd)+' (jniFeature')
for arg in cmd.args:
jfile.write (', _' + arg.name)
if isinstance(arg.argType, ArEnum):
jfile.write ('.getValue()')
jfile.write (');\n')
jfile.write (' error = ARCONTROLLER_ERROR_ENUM.getFromValue(nativeError);\n')
jfile.write (' }\n')
jfile.write (' }\n')
jfile.write (' return error;\n')
jfile.write (' }\n')
jfile.write (' \n')
for arg in cmd.args:
jfile.write (' public ARCONTROLLER_ERROR_ENUM '+javaSetNAckFunction (cmd, arg)+' (' + xmlToJava (MODULE_ARCOMMANDS, feature, cmd, arg) + ' _' + arg.name+')\n')
jfile.write (' {\n')
jfile.write (' ARCONTROLLER_ERROR_ENUM error = ARCONTROLLER_ERROR_ENUM.ARCONTROLLER_OK;\n')
jfile.write (' synchronized (this)\n')
jfile.write (' {\n')
jfile.write (' if(initOk == true)\n')
jfile.write (' {\n')
if isinstance(arg.argType, ArEnum):
jfile.write (' int nativeError = '+nativeSetNAckFunction(cmd, arg)+' (jniFeature, _' + arg.name + '.getValue());\n')
else:
jfile.write (' int nativeError = '+nativeSetNAckFunction(cmd, arg)+' (jniFeature, _' + arg.name + ');\n')
jfile.write (' error = ARCONTROLLER_ERROR_ENUM.getFromValue(nativeError);\n')
jfile.write (' }\n')
jfile.write (' }\n')
jfile.write (' return error;\n')
jfile.write (' }\n')
jfile.write (' \n')
jfile.write ('\n')
jfile.write ('}\n')
jfile.write ('\n')
def generateFeatureControllersJNI (ctx, JNI_C_DIR):
#################################################
# Write Feature controller JNI c file #
#################################################
for feature in ctx.features:
javaClassName = 'ARFeature'+ ARCapitalize(get_ftr_old_name(feature))
jniClassName = MODULE_ARCONTROLLER + '_JNI_Feature'+ ARCapitalize(get_ftr_old_name(feature)) #ARTypeName (MODULE_FEATURE, 'JNI_'+get_ftr_old_name(feature), '')
className = ARTypeName (MODULE_FEATURE, get_ftr_old_name(feature), '')
classTag = 'ARCONTROLLER_JNIFEATURE'+get_ftr_old_name(feature).upper()+'_TAG'
cFileName = jniClassName + '.c'
filepath = JNI_C_DIR + cFileName
cFile = open (filepath, 'w')
cFile.write ('/**********************************************************\n')
cFile.write (' * AUTOGENERATED FILE *\n')
cFile.write (' * DO NOT MODIFY IT *\n')
cFile.write (' * *\n')
cFile.write (' * To add new commands : *\n')
cFile.write (' * - Modify ../Xml/commands.xml file *\n')
cFile.write (' * - Re-run generateFeatureControllers.py script *\n')
cFile.write (' * *\n')
cFile.write (' **********************************************************/\n')
cFile.write ('\n')
cFile.write ('/**\n')
cFile.write ('* @file '+jniClassName+'\n')
cFile.write ('* @brief '+ARTypeName (MODULE_FEATURE, get_ftr_old_name(feature), '')+' JNI feature '+get_ftr_old_name(feature)+' c file.\n')
cFile.write ('*/\n')
cFile.write ('\n')
cFile.write ('/*****************************************\n')
cFile.write (' *\n')
cFile.write (' * include file :\n')
cFile.write (' *\n')
cFile.write (' *****************************************/\n')
cFile.write ('\n')
cFile.write ('#include <jni.h>\n')
cFile.write ('#include <stdlib.h>\n')
cFile.write ('\n')
cFile.write ('#include <libARSAL/ARSAL_Print.h>\n')
cFile.write ('\n')
cFile.write ('#include <libARController/ARCONTROLLER_Error.h>\n')
cFile.write ('#include <libARController/ARCONTROLLER_Feature.h>\n')
cFile.write ('\n')
cFile.write ('/*****************************************\n')
cFile.write (' *\n')
cFile.write (' * define :\n')
cFile.write (' *\n')
cFile.write (' *****************************************/\n')
cFile.write ('\n')
cFile.write ('#define '+classTag+' "'+jniClassName+'"\n')
cFile.write ('\n')
cFile.write ('/*****************************************\n')
cFile.write (' *\n')
cFile.write (' * private header:\n')
cFile.write (' *\n')
cFile.write (' *****************************************/\n')
cFile.write ('\n')
cFile.write ('\n')
cFile.write ('/*****************************************\n')
cFile.write (' *\n')
cFile.write (' * implementation :\n')
cFile.write (' *\n')
cFile.write (' *****************************************/\n')
cFile.write ('\n')
for evt in feature.evts:
for arg in [arg1 for arg1 in evt.args if arg1.name != _LIST_FLAG]:
cFile.write ('JNIEXPORT jstring JNICALL\n')
cFile.write ('Java_com_parrot_arsdk_arcontroller_'+javaClassName+'_' + nativeGetNotificationVal(feature, evt, arg) + ' (JNIEnv *env , jclass class)\n')
cFile.write ('{\n')
cFile.write (' return (*env)->NewStringUTF(env, '+defineNotification(feature, evt, arg)+');\n')
cFile.write ('}\n')
cFile.write ('\n')
for cmd in feature.cmds:
cFile.write ('JNIEXPORT jint JNICALL\n')
cFile.write ('Java_com_parrot_arsdk_arcontroller_'+javaClassName+'_'+nativeSendingFunction(cmd)+' (JNIEnv *env, jobject thizz, jlong jFeature')
for arg in cmd.args:
if isinstance(arg.argType, ArMultiSetting):
for multiset_msg in arg.argType.msgs:
cFile.write (', jint '+multiset_msg.ftr.name+ multiset_msg.name+'IsSet')
for multiset_msg_arg in multiset_msg.args:
cFile.write (', ' + xmlToJni (multiset_msg.ftr, multiset_msg, multiset_msg_arg) + ' ' +multiset_msg.ftr.name+ multiset_msg.name+ multiset_msg_arg.name)
else:
cFile.write (', ' + xmlToJni (feature, cmd, arg) + ' _' + arg.name + '')
cFile.write (')\n')
cFile.write ('{\n')
cFile.write (' // local declarations\n')
cFile.write (' '+className+' *nativeFeature = ('+className+'*) (intptr_t) jFeature;\n')
cFile.write (' eARCONTROLLER_ERROR error = ARCONTROLLER_OK;\n')
hasArgString = False
for arg in cmd.args:
if (arg.argType == ArArgType.STRING):
hasArgString = True
cFile.write (' const char *native'+ARCapitalize(arg.name)+' = (*env)->GetStringUTFChars(env, _'+arg.name+', 0);\n')
elif isinstance(arg.argType, ArMultiSetting):
cFile.write (' ' + xmlToC (LIB_MODULE, feature, cmd, arg) + ' c_' + arg.name + ' = {\n')
for multiset_msg in arg.argType.msgs:
cFile.write (' .'+multiset_msg.name+'.isSet = '+multiset_msg.ftr.name+ multiset_msg.name+'IsSet,\n')
for multiset_msg_arg in multiset_msg.args:
cFile.write (' .'+multiset_msg.name+'.'+multiset_msg_arg.name+' = ' +multiset_msg.ftr.name+ multiset_msg.name+ multiset_msg_arg.name+',\n')
cFile.write ('};\n')
cFile.write (' \n')
cFile.write (' error = nativeFeature->'+sendingFunction(cmd)+' (nativeFeature')
for arg in cmd.args:
if (arg.argType == ArArgType.STRING):
cFile.write (', (char *)native'+ARCapitalize(arg.name))
elif isinstance(arg.argType, ArMultiSetting):
cFile.write (', &c_' + arg.name)
else:
cFile.write (', _' + arg.name)
cFile.write (');\n')
cFile.write ('\n')
if hasArgString:
cFile.write (' // cleanup\n')
for arg in cmd.args:
if (arg.argType == ArArgType.STRING):
cFile.write (' (*env)->ReleaseStringUTFChars(env, _'+arg.name+', native'+ARCapitalize(arg.name)+');\n')
if hasArgString:
cFile.write ('\n')
cFile.write (' return error;\n')
cFile.write ('}\n')
cFile.write ('\n')
if cmd.bufferType == ArCmdBufferType.NON_ACK:
cFile.write ('JNIEXPORT jint JNICALL\n')
cFile.write ('Java_com_parrot_arsdk_arcontroller_'+javaClassName+'_'+nativeSetNAckFunction(cmd)+' (JNIEnv *env, jobject thizz, jlong jFeature')
for arg in cmd.args:
cFile.write (', ' + xmlToJni (feature, cmd, arg) + ' _' + arg.name + '')
cFile.write (')\n')
cFile.write ('{\n')
cFile.write (' // local declarations\n')
cFile.write (' '+className+' *nativeFeature = ('+className+'*) (intptr_t) jFeature;\n')
cFile.write (' eARCONTROLLER_ERROR error = ARCONTROLLER_OK;\n')
hasArgString = False
for arg in cmd.args:
if (arg.argType == ArArgType.STRING):
hasArgString = True
cFile.write (' const char *native'+ARCapitalize(arg.name)+' = (*env)->GetStringUTFChars(env, _'+arg.name+', 0);\n')
cFile.write (' \n')
cFile.write (' error = nativeFeature->'+setNAckFunction(cmd)+' (nativeFeature')
for arg in cmd.args:
if (arg.argType == ArArgType.STRING):
cFile.write (', (char *)native'+ARCapitalize(arg.name))
else:
cFile.write (', _' + arg.name)
cFile.write (');\n')
cFile.write ('\n')
if hasArgString:
cFile.write (' // cleanup\n')
for arg in cmd.args:
if (arg.argType == ArArgType.STRING):
cFile.write (' (*env)->ReleaseStringUTFChars(env, _'+arg.name+', native'+ARCapitalize(arg.name)+');\n')
if hasArgString:
cFile.write ('\n')
cFile.write (' return error;\n')
cFile.write ('}\n')
cFile.write ('\n')
for arg in cmd.args:
cFile.write ('JNIEXPORT jint JNICALL\n')
cFile.write ('Java_com_parrot_arsdk_arcontroller_'+javaClassName+'_'+nativeSetNAckFunction(cmd, arg)+' (JNIEnv *env, jobject thizz, jlong jFeature, ' + xmlToJni (feature, cmd, arg) + ' _' + arg.name + ')\n')
cFile.write ('{\n')
cFile.write (' // local declarations\n')
cFile.write (' '+className+' *nativeFeature = ('+className+'*) (intptr_t) jFeature;\n')
cFile.write (' eARCONTROLLER_ERROR error = ARCONTROLLER_OK;\n')
if (arg.argType == ArArgType.STRING):
cFile.write (' const char *native'+ARCapitalize(arg.name)+' = (*env)->GetStringUTFChars(env, _'+arg.name+', 0);\n')
cFile.write (' \n')
cFile.write (' error = nativeFeature->'+setNAckFunction(cmd, arg)+' (nativeFeature')
if (arg.argType == ArArgType.STRING):
cFile.write (', (char *)native'+ARCapitalize(arg.name))
else:
cFile.write (', _' + arg.name)
cFile.write (');\n')
cFile.write ('\n')
if (arg.argType == ArArgType.STRING):
cFile.write (' // cleanup\n')
cFile.write (' (*env)->ReleaseStringUTFChars(env, _'+arg.name+', native'+ARCapitalize(arg.name)+');\n')
cFile.write ('\n')
cFile.write (' return error;\n')
cFile.write ('}\n')
cFile.write ('\n')
cFile.close ()
cFileName = jniClassName + '.c'
filepath = JNI_C_DIR + CTRL_FTR_JNI_C_NAME
cFile = open (filepath, 'w')
for feature in ctx.features:
jniClassName = MODULE_ARCONTROLLER + '_JNI_Feature'+ ARCapitalize(get_ftr_old_name(feature))
jniFtrFileName = jniClassName + '.c'
cFile.write ('#include "'+jniFtrFileName+'"\n')
cFile.close ()
def list_files_ftr_ctrls (ctx, SRC_DIR, INC_DIR):
''' Print features controllers generated files '''
print INC_DIR + CTRL_FTR_H_NAME
print SRC_DIR + CTRL_FTR_PRIV_H_NAME
print SRC_DIR + CTRL_FTR_C_NAME
def list_files_ftr_ctrls_jni (ctx, JNI_JAVA_DIR):
''' Print features controllers generated files '''
# Print java feature class files
for feature in ctx.features:
print JNI_JAVA_DIR + 'ARFeature'+ ARCapitalize(get_ftr_old_name(feature)) +'.java'
def list_files_ftr_ctrls_jni (ctx, JNI_C_DIR):
''' Print features controllers generated files '''
# Print feature JNI c files
for feature in ctx.features:
print JNI_C_DIR + 'ARCONTROLLER_JNI_Feature'+ ARCapitalize(get_ftr_old_name(feature)) + '.c'
print JNI_C_DIR + CTRL_FTR_JNI_C_NAME
| 57.805077
| 304
| 0.514311
| 12,932
| 134,339
| 5.241417
| 0.044618
| 0.172465
| 0.169588
| 0.090289
| 0.895105
| 0.870438
| 0.838305
| 0.790225
| 0.754271
| 0.721858
| 0
| 0.000837
| 0.32447
| 134,339
| 2,323
| 305
| 57.829961
| 0.746072
| 0.022257
| 0
| 0.69392
| 0
| 0.003145
| 0.382406
| 0.083309
| 0
| 0
| 0
| 0.001722
| 0
| 0
| null | null | 0
| 0.006813
| null | null | 0.006289
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5d6af3528d48083b4b2318b34fa2429cfe267e4d
| 4,464
|
py
|
Python
|
Python/zuma-game.py
|
sm2774us/leetcode_interview_prep_2021
|
33b41bea66c266b733372d9a8b9d2965cd88bf8c
|
[
"Fair"
] | null | null | null |
Python/zuma-game.py
|
sm2774us/leetcode_interview_prep_2021
|
33b41bea66c266b733372d9a8b9d2965cd88bf8c
|
[
"Fair"
] | null | null | null |
Python/zuma-game.py
|
sm2774us/leetcode_interview_prep_2021
|
33b41bea66c266b733372d9a8b9d2965cd88bf8c
|
[
"Fair"
] | null | null | null |
# for this problem, it should either relax time limit or
# add a description that a ball can be only inserted beside a ball with same color
import collections
# Time: O((b+h) * h!*(b+h-1)!/(b-1)!)
# Space: O((b+h) * h!*(b+h-1)!/(b-1)!)
# brute force solution
class Solution_TLE_BUT_CORRECT(object):
def findMinStep(self, board, hand):
"""
:type board: str
:type hand: str
:rtype: int
"""
def shrink(s): # Time: O(n), Space: O(n)
stack = []
start = 0
for i in range(len(s)+1):
if i == len(s) or s[i] != s[start]:
if stack and stack[-1][0] == s[start]:
stack[-1][1] += i - start
if stack[-1][1] >= 3:
stack.pop()
elif s and i - start < 3:
stack += [s[start], i - start],
start = i
result = []
for p in stack:
result += [p[0]] * p[1]
return result
def findMinStepHelper(board, hand, lookup):
if not board: return 0
if not hand: return float("inf")
if tuple(hand) in lookup[tuple(board)]: return lookup[tuple(board)][tuple(hand)]
result = float("inf")
for i in range(len(hand)):
for j in range(len(board)+1):
next_board = shrink(board[0:j] + hand[i:i+1] + board[j:])
next_hand = hand[0:i] + hand[i+1:]
result = min(result, findMinStepHelper(next_board, next_hand, lookup) + 1)
lookup[tuple(board)][tuple(hand)] = result
return result
lookup = collections.defaultdict(dict)
board, hand = list(board), list(hand)
result = findMinStepHelper(board, hand, lookup)
return -1 if result == float("inf") else result
# Time: O(b * b! * h!)
# Space: O(b * b! * h!)
# if a ball can be only inserted beside a ball with same color,
# we can do by this solution
class Solution_WRONG_GREEDY_BUT_ACCEPT(object):
def findMinStep(self, board, hand):
"""
:type board: str
:type hand: str
:rtype: int
"""
def shrink(s): # Time: O(n), Space: O(n)
stack = []
start = 0
for i in range(len(s)+1):
if i == len(s) or s[i] != s[start]:
if stack and stack[-1][0] == s[start]:
stack[-1][1] += i - start
if stack[-1][1] >= 3:
stack.pop()
elif s and i - start < 3:
stack += [s[start], i - start],
start = i
result = []
for p in stack:
result += [p[0]] * p[1]
return result
def find(board, c, j):
for i in range(j, len(board)):
if board[i] == c:
return i
return -1
def findMinStepHelper(board, hand, lookup):
if not board: return 0
if not hand: return float("inf")
if tuple(hand) in lookup[tuple(board)]: return lookup[tuple(board)][tuple(hand)]
result = float("inf")
for i in range(len(hand)):
j = 0
while j < len(board):
k = find(board, hand[i], j)
if k == -1:
break
if k < len(board) - 1 and board[k] == board[k+1]:
next_board = shrink(board[0:k] + board[k+2:])
next_hand = hand[0:i] + hand[i+1:]
result = min(result, findMinStepHelper(next_board, next_hand, lookup) + 1)
k += 1
elif i > 0 and hand[i] == hand[i-1]:
next_board = shrink(board[0:k] + board[k+1:])
next_hand = hand[0:i-1] + hand[i+1:]
result = min(result, findMinStepHelper(next_board, next_hand, lookup) + 2)
j = k+1
lookup[tuple(board)][tuple(hand)] = result
return result
lookup = collections.defaultdict(dict)
board, hand = list(board), list(hand)
hand.sort()
result = findMinStepHelper(board, hand, lookup)
return -1 if result == float("inf") else result
| 37.2
| 98
| 0.456541
| 558
| 4,464
| 3.61828
| 0.163082
| 0.040119
| 0.047548
| 0.027241
| 0.807826
| 0.796929
| 0.786033
| 0.786033
| 0.786033
| 0.74839
| 0
| 0.022222
| 0.415323
| 4,464
| 119
| 99
| 37.512605
| 0.751341
| 0.112455
| 0
| 0.72093
| 0
| 0
| 0.004648
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081395
| false
| 0
| 0.011628
| 0
| 0.209302
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
537c6b5f59b4d2730ce5cbe4ecb5e896ee6860fb
| 733
|
py
|
Python
|
opencolorio_config_aces/config/generation/__init__.py
|
AcademySoftwareFoundation/OpenColorIO-Config-ACES
|
79e07061e28a81d1bb0cbfd6b5d5376a35025b58
|
[
"BSD-3-Clause"
] | 52
|
2020-05-19T05:05:11.000Z
|
2022-03-29T20:20:42.000Z
|
opencolorio_config_aces/config/generation/__init__.py
|
AcademySoftwareFoundation/OpenColorIO-Config-ACES
|
79e07061e28a81d1bb0cbfd6b5d5376a35025b58
|
[
"BSD-3-Clause"
] | 41
|
2020-05-17T03:18:24.000Z
|
2022-03-31T12:02:35.000Z
|
opencolorio_config_aces/config/generation/__init__.py
|
AcademySoftwareFoundation/OpenColorIO-Config-ACES
|
79e07061e28a81d1bb0cbfd6b5d5376a35025b58
|
[
"BSD-3-Clause"
] | 12
|
2020-05-18T18:21:57.000Z
|
2022-03-29T20:00:55.000Z
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
from .common import (produce_transform, transform_factory,
group_transform_factory, colorspace_factory,
named_transform_factory, view_transform_factory,
look_factory, ConfigData, deserialize_config_data,
serialize_config_data, validate_config, generate_config)
__all__ = [
'produce_transform', 'transform_factory', 'group_transform_factory',
'colorspace_factory', 'named_transform_factory', 'view_transform_factory',
'look_factory', 'ConfigData', 'deserialize_config_data',
'serialize_config_data', 'validate_config', 'generate_config'
]
| 45.8125
| 78
| 0.723056
| 72
| 733
| 6.861111
| 0.430556
| 0.259109
| 0.101215
| 0.129555
| 0.809717
| 0.809717
| 0.809717
| 0.809717
| 0.809717
| 0.809717
| 0
| 0.001695
| 0.195089
| 733
| 15
| 79
| 48.866667
| 0.835593
| 0.120055
| 0
| 0
| 0
| 0
| 0.336449
| 0.174455
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
53808726dfc90f992557252cd6f255e138383223
| 5,130
|
py
|
Python
|
useful_cmaps/cmaps/coco.py
|
qizhuli/useful-cmaps
|
49c6b89cf4ca4f6b1094a0a8db8f8488e0fb8e7b
|
[
"Apache-2.0"
] | null | null | null |
useful_cmaps/cmaps/coco.py
|
qizhuli/useful-cmaps
|
49c6b89cf4ca4f6b1094a0a8db8f8488e0fb8e7b
|
[
"Apache-2.0"
] | null | null | null |
useful_cmaps/cmaps/coco.py
|
qizhuli/useful-cmaps
|
49c6b89cf4ca4f6b1094a0a8db8f8488e0fb8e7b
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa
COCO_CMAP = [
220, 20, 60,
119, 11, 32,
0, 0, 142,
0, 0, 230,
106, 0, 228,
0, 60, 100,
0, 80, 100,
0, 0, 70,
0, 0, 192,
250, 170, 30,
100, 170, 30,
220, 220, 0,
175, 116, 175,
250, 0, 30,
165, 42, 42,
255, 77, 255,
0, 226, 252,
182, 182, 255,
0, 82, 0,
120, 166, 157,
110, 76, 0,
174, 57, 255,
199, 100, 0,
72, 0, 118,
255, 179, 240,
0, 125, 92,
209, 0, 151,
188, 208, 182,
0, 220, 176,
255, 99, 164,
92, 0, 73,
133, 129, 255,
78, 180, 255,
0, 228, 0,
174, 255, 243,
45, 89, 255,
134, 134, 103,
145, 148, 174,
255, 208, 186,
197, 226, 255,
171, 134, 1,
109, 63, 54,
207, 138, 255,
151, 0, 95,
9, 80, 61,
84, 105, 51,
74, 65, 105,
166, 196, 102,
208, 195, 210,
255, 109, 65,
0, 143, 149,
179, 0, 194,
209, 99, 106,
5, 121, 0,
227, 255, 205,
147, 186, 208,
153, 69, 1,
3, 95, 161,
163, 255, 0,
119, 0, 170,
0, 182, 199,
0, 165, 120,
183, 130, 88,
95, 32, 0,
130, 114, 135,
110, 129, 133,
166, 74, 118,
219, 142, 185,
79, 210, 114,
178, 90, 62,
65, 70, 15,
127, 167, 115,
59, 105, 106,
142, 108, 45,
196, 172, 0,
95, 54, 80,
128, 76, 255,
201, 57, 1,
246, 0, 122,
191, 162, 208,
255, 255, 128,
147, 211, 203,
150, 100, 100,
168, 171, 172,
146, 112, 198,
210, 170, 100,
92, 136, 89,
218, 88, 184,
241, 129, 0,
217, 17, 255,
124, 74, 181,
70, 70, 70,
255, 228, 255,
154, 208, 0,
193, 0, 92,
76, 91, 113,
255, 180, 195,
106, 154, 176,
230, 150, 140,
60, 143, 255,
128, 64, 128,
92, 82, 55,
254, 212, 124,
73, 77, 174,
255, 160, 98,
255, 255, 255,
104, 84, 109,
169, 164, 131,
225, 199, 255,
137, 54, 74,
135, 158, 223,
7, 246, 231,
107, 255, 200,
58, 41, 149,
183, 121, 142,
255, 73, 97,
107, 142, 35,
190, 153, 153,
146, 139, 141,
70, 130, 180,
134, 199, 156,
209, 226, 140,
96, 36, 108,
96, 96, 96,
64, 170, 64,
152, 251, 152,
208, 229, 228,
206, 186, 171,
152, 161, 64,
116, 112, 0,
0, 114, 143,
102, 102, 156,
250, 141, 255,
]
COCO_LEGACY_CMAP = [
0, 0, 0,
220, 20, 60,
119, 11, 32,
0, 0, 142,
0, 0, 230,
106, 0, 228,
0, 60, 100,
0, 80, 100,
0, 0, 70,
0, 0, 192,
250, 170, 30,
100, 170, 30,
220, 220, 0,
175, 116, 175,
250, 0, 30,
165, 42, 42,
255, 77, 255,
0, 226, 252,
182, 182, 255,
0, 82, 0,
120, 166, 157,
110, 76, 0,
174, 57, 255,
199, 100, 0,
72, 0, 118,
255, 179, 240,
0, 125, 92,
209, 0, 151,
188, 208, 182,
0, 220, 176,
255, 99, 164,
92, 0, 73,
133, 129, 255,
78, 180, 255,
0, 228, 0,
174, 255, 243,
45, 89, 255,
134, 134, 103,
145, 148, 174,
255, 208, 186,
197, 226, 255,
171, 134, 1,
109, 63, 54,
207, 138, 255,
151, 0, 95,
9, 80, 61,
84, 105, 51,
74, 65, 105,
166, 196, 102,
208, 195, 210,
255, 109, 65,
0, 143, 149,
179, 0, 194,
209, 99, 106,
5, 121, 0,
227, 255, 205,
147, 186, 208,
153, 69, 1,
3, 95, 161,
163, 255, 0,
119, 0, 170,
0, 182, 199,
0, 165, 120,
183, 130, 88,
95, 32, 0,
130, 114, 135,
110, 129, 133,
166, 74, 118,
219, 142, 185,
79, 210, 114,
178, 90, 62,
65, 70, 15,
127, 167, 115,
59, 105, 106,
142, 108, 45,
196, 172, 0,
95, 54, 80,
128, 76, 255,
201, 57, 1,
246, 0, 122,
191, 162, 208,
255, 255, 128,
147, 211, 203,
150, 100, 100,
168, 171, 172,
146, 112, 198,
210, 170, 100,
92, 136, 89,
218, 88, 184,
241, 129, 0,
217, 17, 255,
124, 74, 181,
70, 70, 70,
255, 228, 255,
154, 208, 0,
193, 0, 92,
76, 91, 113,
255, 180, 195,
106, 154, 176,
230, 150, 140,
60, 143, 255,
128, 64, 128,
92, 82, 55,
254, 212, 124,
73, 77, 174,
255, 160, 98,
255, 255, 255,
104, 84, 109,
169, 164, 131,
225, 199, 255,
137, 54, 74,
135, 158, 223,
7, 246, 231,
107, 255, 200,
58, 41, 149,
183, 121, 142,
255, 73, 97,
107, 142, 35,
190, 153, 153,
146, 139, 141,
70, 130, 180,
134, 199, 156,
209, 226, 140,
96, 36, 108,
96, 96, 96,
64, 170, 64,
152, 251, 152,
208, 229, 228,
206, 186, 171,
152, 161, 64,
116, 112, 0,
0, 114, 143,
102, 102, 156,
250, 141, 255,
]
| 18.586957
| 20
| 0.401559
| 808
| 5,130
| 2.545792
| 0.216584
| 0.011667
| 0.009723
| 0.009723
| 0.982985
| 0.982985
| 0.982985
| 0.982985
| 0.982985
| 0.982985
| 0
| 0.706169
| 0.440741
| 5,130
| 275
| 21
| 18.654545
| 0.010805
| 0.002339
| 0
| 0.98155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
538778fecc958d6462859e873eba01827bb4e1ff
| 4,444
|
py
|
Python
|
tests/test_spotify.py
|
chriswang030/SwagLyrics-For-Spotify
|
a60fafe1ebe7a7d228495eba4d71f3a3b155f947
|
[
"MIT"
] | 1
|
2019-02-14T05:24:18.000Z
|
2019-02-14T05:24:18.000Z
|
tests/test_spotify.py
|
addddd123/SwagLyrics-For-Spotify
|
8c0a9a4989c976adb79ce34c795d4070ea2abac3
|
[
"MIT"
] | null | null | null |
tests/test_spotify.py
|
addddd123/SwagLyrics-For-Spotify
|
8c0a9a4989c976adb79ce34c795d4070ea2abac3
|
[
"MIT"
] | null | null | null |
"""
Contains unit tests for spotify.py for linux
"""
import unittest
from swaglyrics.spotify import get_info_linux, get_info_windows, get_info_mac, song, artist
from mock import mock, patch, Mock
import platform
class LinuxTests(unittest.TestCase):
"""
Unit tests for Linux
"""
def setup(self):
pass
@patch('swaglyrics.spotify.get_info_linux')
def test_that_artist_function_calls_get_info(self, mock):
"""
test that test artist function calls get_info_linux function
"""
x = artist()
self.assertTrue(mock.called)
@patch('swaglyrics.spotify.get_info_linux')
def test_that_song_function_calls_get_info(self, mock):
"""
test that test song function calls get_info_linux function
"""
x = song()
self.assertTrue(mock.called)
@patch('swaglyrics.spotify.get_info_linux', side_effect=ValueError)
def test_that_artist_function_returns_None_when_error(self, mock):
"""
test that test artist function returns None when the get_info_linux function will return an error
"""
x = artist()
self.assertEqual(x, None)
@patch('swaglyrics.spotify.get_info_linux', side_effect=ValueError)
def test_that_song_function_returns_None_when_error(self, mock):
"""
test that test song function returns None when the get_info_linux function will return an error
"""
x = song()
self.assertEqual(x, None)
class WindowsTests(unittest.TestCase):
"""
Unit tests for Windows
"""
def setup(self):
pass
if platform.system() == "Windows":
import win32gui
@mock.patch('win32gui.GetWindowText', return_value='Alan Walker - Darkside')
@mock.patch('win32gui.EnumWindows', return_value=None)
def test_get_info_windows(self, mock_win32gui_1, mock_win32gui_2):
"""
test that get_info_windows works
"""
x = get_info_windows()
self.assertEqual(x, ("Alan Walker", "Darkside"))
@mock.patch('win32gui.GetWindowText', return_value='Alan Walker')
@mock.patch('win32gui.EnumWindows', return_value=None)
def test_get_info_windows_error_handling(self, mock_win32gui_1, mock_win32gui_2):
"""
test that get_info_windows return None when it doesn't find
"""
x = get_info_windows()
self.assertEqual(x, None)
@patch('swaglyrics.spotify.get_info_windows')
def test_that_artist_function_calls_get_info(self, mock):
"""
test that test artist function calls get_info_windows function
"""
x = artist()
self.assertTrue(mock.called)
@patch('swaglyrics.spotify.get_info_windows')
def test_that_song_function_calls_get_info(self, mock):
"""
test that test song function calls get_info_windows function
"""
x = song()
self.assertTrue(mock.called)
@patch('swaglyrics.spotify.get_info_windows', side_effect=ValueError)
def test_that_artist_function_returns_None_when_error(self, mock):
"""
test that test artist function returns None when the get_info_windows function will return an error
"""
x = artist()
self.assertEqual(x, None)
@patch('swaglyrics.spotify.get_info_windows', side_effect=ValueError)
def test_that_song_function_returns_None_when_error(self, mock):
"""
test that test song function returns None when the get_info_windows function will return an error
"""
x = song()
self.assertEqual(x, None)
@mock.patch('platform.system', return_value='Darwin')
class DarwinTests(unittest.TestCase):
"""
Unit tests for macOS
"""
def setup(self, mock_os):
pass
@patch('swaglyrics.spotify.get_info_mac')
def test_that_artist_function_calls_get_info(self, mock, mock_os):
"""
test that test artist function calls get_info_mac function
"""
x = artist()
self.assertTrue(mock.called)
@patch('swaglyrics.spotify.get_info_mac')
def test_that_song_function_calls_get_info(self, mock, mock_os):
"""
test that test song function calls get_info_mac function
"""
x = song()
self.assertTrue(mock.called)
@patch('swaglyrics.spotify.get_info_mac', side_effect=ValueError)
def test_that_artist_function_returns_None_when_error(self, mock, mock_os):
"""
test that test artist function returns None when the get_info_mac function will return an error
"""
x = artist()
self.assertEqual(x, None)
@patch('swaglyrics.spotify.get_info_mac', side_effect=ValueError)
def test_that_song_function_returns_None_when_error(self, mock, mock_os):
"""
test that test song function returns None when the get_info_mac function will return an error
"""
x = song()
self.assertEqual(x, None)
if __name__ == '__main__':
unittest.main()
| 29.045752
| 101
| 0.756301
| 640
| 4,444
| 4.970313
| 0.109375
| 0.085822
| 0.066017
| 0.09431
| 0.86828
| 0.841874
| 0.831185
| 0.807293
| 0.767683
| 0.738761
| 0
| 0.005743
| 0.137939
| 4,444
| 152
| 102
| 29.236842
| 0.824589
| 0.257426
| 0
| 0.710526
| 0
| 0
| 0.182578
| 0.141434
| 0
| 0
| 0
| 0
| 0.184211
| 1
| 0.223684
| false
| 0.039474
| 0.065789
| 0
| 0.328947
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
99031d827730d1d15934967f1cb1ceea93895524
| 1,472
|
py
|
Python
|
SnowDown/backend/migrations/0002_auto_20211106_2126.py
|
HACC2021/SnowDown
|
25f261d01a9f251a9bbb6a7120f9059de4020346
|
[
"MIT"
] | 1
|
2021-11-10T17:38:09.000Z
|
2021-11-10T17:38:09.000Z
|
SnowDown/backend/migrations/0002_auto_20211106_2126.py
|
HACC2021/SnowDown
|
25f261d01a9f251a9bbb6a7120f9059de4020346
|
[
"MIT"
] | null | null | null |
SnowDown/backend/migrations/0002_auto_20211106_2126.py
|
HACC2021/SnowDown
|
25f261d01a9f251a9bbb6a7120f9059de4020346
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2021-11-07 07:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='group_incident_table',
name='Date',
field=models.CharField(default=None, max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='group_incident_table',
name='Time',
field=models.CharField(default=None, max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='group_incident_table',
name='dateTime',
field=models.DateTimeField(default=None),
preserve_default=False,
),
migrations.AddField(
model_name='incident_table',
name='Date',
field=models.CharField(default=None, max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='incident_table',
name='Time',
field=models.CharField(default=None, max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='incident_table',
name='dateTime',
field=models.DateTimeField(default=None),
preserve_default=False,
),
]
| 29.44
| 65
| 0.570652
| 138
| 1,472
| 5.898551
| 0.304348
| 0.132678
| 0.169533
| 0.199017
| 0.826781
| 0.826781
| 0.826781
| 0.826781
| 0.787469
| 0.787469
| 0
| 0.031187
| 0.324728
| 1,472
| 49
| 66
| 30.040816
| 0.787726
| 0.030571
| 0
| 0.837209
| 1
| 0
| 0.107368
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.023256
| 0
| 0.093023
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
99051e2a8e7a6da378ef45ea19e6a95d527733fb
| 187,018
|
py
|
Python
|
pyclient/confluo/rpc/rpc_service.py
|
louishust/confluo
|
55377acf19bd468015fb2c98cad737b264346110
|
[
"Apache-2.0"
] | null | null | null |
pyclient/confluo/rpc/rpc_service.py
|
louishust/confluo
|
55377acf19bd468015fb2c98cad737b264346110
|
[
"Apache-2.0"
] | null | null | null |
pyclient/confluo/rpc/rpc_service.py
|
louishust/confluo
|
55377acf19bd468015fb2c98cad737b264346110
|
[
"Apache-2.0"
] | null | null | null |
#
# Autogenerated by Thrift Compiler (0.11.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(object):
def register_handler(self):
"""
Management ops *
"""
pass
def deregister_handler(self):
pass
def create_atomic_multilog(self, name, schema, mode):
"""
Parameters:
- name
- schema
- mode
"""
pass
def get_atomic_multilog_info(self, name):
"""
Parameters:
- name
"""
pass
def remove_atomic_multilog(self, multilog_id):
"""
Parameters:
- multilog_id
"""
pass
def add_index(self, multilog_id, field_name, bucket_size):
"""
Parameters:
- multilog_id
- field_name
- bucket_size
"""
pass
def remove_index(self, multilog_id, field_name):
"""
Parameters:
- multilog_id
- field_name
"""
pass
def add_filter(self, multilog_id, filter_name, filter_expr):
"""
Parameters:
- multilog_id
- filter_name
- filter_expr
"""
pass
def remove_filter(self, multilog_id, filter_name):
"""
Parameters:
- multilog_id
- filter_name
"""
pass
def add_aggregate(self, mutlilog_id, aggregate_name, filter_name, aggregate_expr):
"""
Parameters:
- mutlilog_id
- aggregate_name
- filter_name
- aggregate_expr
"""
pass
def remove_aggregate(self, multilog_id, aggregate_name):
"""
Parameters:
- multilog_id
- aggregate_name
"""
pass
def add_trigger(self, multilog_id, trigger_name, trigger_expr):
"""
Parameters:
- multilog_id
- trigger_name
- trigger_expr
"""
pass
def remove_trigger(self, multilog_id, trigger_name):
"""
Parameters:
- multilog_id
- trigger_name
"""
pass
def append(self, multilog_id, data):
"""
Query ops *
Parameters:
- multilog_id
- data
"""
pass
def append_batch(self, multilog_id, batch):
"""
Parameters:
- multilog_id
- batch
"""
pass
def read(self, multilog_id, offset, nrecords):
"""
Parameters:
- multilog_id
- offset
- nrecords
"""
pass
def query_aggregate(self, multilog_id, aggregate_name, begin_ms, end_ms):
"""
Parameters:
- multilog_id
- aggregate_name
- begin_ms
- end_ms
"""
pass
def adhoc_aggregate(self, multilog_id, aggregate_expr, filter_expr):
"""
Parameters:
- multilog_id
- aggregate_expr
- filter_expr
"""
pass
def adhoc_filter(self, multilog_id, filter_expr):
"""
Parameters:
- multilog_id
- filter_expr
"""
pass
def predef_filter(self, multilog_id, filter_name, begin_ms, end_ms):
"""
Parameters:
- multilog_id
- filter_name
- begin_ms
- end_ms
"""
pass
def combined_filter(self, multilog_id, filter_name, filter_expr, begin_ms, end_ms):
"""
Parameters:
- multilog_id
- filter_name
- filter_expr
- begin_ms
- end_ms
"""
pass
def alerts_by_time(self, multilog_id, begin_ms, end_ms):
"""
Parameters:
- multilog_id
- begin_ms
- end_ms
"""
pass
def alerts_by_trigger_and_time(self, multilog_id, trigger_name, begin_ms, end_ms):
"""
Parameters:
- multilog_id
- trigger_name
- begin_ms
- end_ms
"""
pass
def get_more(self, multilog_id, desc):
"""
Parameters:
- multilog_id
- desc
"""
pass
def num_records(self, multilog_id):
"""
Parameters:
- multilog_id
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def register_handler(self):
"""
Management ops *
"""
self.send_register_handler()
self.recv_register_handler()
def send_register_handler(self):
self._oprot.writeMessageBegin('register_handler', TMessageType.CALL, self._seqid)
args = register_handler_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_register_handler(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = register_handler_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ex is not None:
raise result.ex
return
def deregister_handler(self):
self.send_deregister_handler()
self.recv_deregister_handler()
def send_deregister_handler(self):
self._oprot.writeMessageBegin('deregister_handler', TMessageType.CALL, self._seqid)
args = deregister_handler_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deregister_handler(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = deregister_handler_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ex is not None:
raise result.ex
return
def create_atomic_multilog(self, name, schema, mode):
"""
Parameters:
- name
- schema
- mode
"""
self.send_create_atomic_multilog(name, schema, mode)
return self.recv_create_atomic_multilog()
def send_create_atomic_multilog(self, name, schema, mode):
self._oprot.writeMessageBegin('create_atomic_multilog', TMessageType.CALL, self._seqid)
args = create_atomic_multilog_args()
args.name = name
args.schema = schema
args.mode = mode
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_create_atomic_multilog(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = create_atomic_multilog_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ex is not None:
raise result.ex
raise TApplicationException(TApplicationException.MISSING_RESULT, "create_atomic_multilog failed: unknown result")
def get_atomic_multilog_info(self, name):
"""
Parameters:
- name
"""
self.send_get_atomic_multilog_info(name)
return self.recv_get_atomic_multilog_info()
def send_get_atomic_multilog_info(self, name):
self._oprot.writeMessageBegin('get_atomic_multilog_info', TMessageType.CALL, self._seqid)
args = get_atomic_multilog_info_args()
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_atomic_multilog_info(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = get_atomic_multilog_info_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_atomic_multilog_info failed: unknown result")
def remove_atomic_multilog(self, multilog_id):
"""
Parameters:
- multilog_id
"""
self.send_remove_atomic_multilog(multilog_id)
self.recv_remove_atomic_multilog()
def send_remove_atomic_multilog(self, multilog_id):
self._oprot.writeMessageBegin('remove_atomic_multilog', TMessageType.CALL, self._seqid)
args = remove_atomic_multilog_args()
args.multilog_id = multilog_id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_remove_atomic_multilog(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = remove_atomic_multilog_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ex is not None:
raise result.ex
return
def add_index(self, multilog_id, field_name, bucket_size):
"""
Parameters:
- multilog_id
- field_name
- bucket_size
"""
self.send_add_index(multilog_id, field_name, bucket_size)
self.recv_add_index()
def send_add_index(self, multilog_id, field_name, bucket_size):
self._oprot.writeMessageBegin('add_index', TMessageType.CALL, self._seqid)
args = add_index_args()
args.multilog_id = multilog_id
args.field_name = field_name
args.bucket_size = bucket_size
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_add_index(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = add_index_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ex is not None:
raise result.ex
return
def remove_index(self, multilog_id, field_name):
"""
Parameters:
- multilog_id
- field_name
"""
self.send_remove_index(multilog_id, field_name)
self.recv_remove_index()
def send_remove_index(self, multilog_id, field_name):
self._oprot.writeMessageBegin('remove_index', TMessageType.CALL, self._seqid)
args = remove_index_args()
args.multilog_id = multilog_id
args.field_name = field_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_remove_index(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = remove_index_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ex is not None:
raise result.ex
return
def add_filter(self, multilog_id, filter_name, filter_expr):
"""
Parameters:
- multilog_id
- filter_name
- filter_expr
"""
self.send_add_filter(multilog_id, filter_name, filter_expr)
self.recv_add_filter()
def send_add_filter(self, multilog_id, filter_name, filter_expr):
self._oprot.writeMessageBegin('add_filter', TMessageType.CALL, self._seqid)
args = add_filter_args()
args.multilog_id = multilog_id
args.filter_name = filter_name
args.filter_expr = filter_expr
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_add_filter(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = add_filter_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ex is not None:
raise result.ex
return
def remove_filter(self, multilog_id, filter_name):
"""
Parameters:
- multilog_id
- filter_name
"""
self.send_remove_filter(multilog_id, filter_name)
self.recv_remove_filter()
def send_remove_filter(self, multilog_id, filter_name):
self._oprot.writeMessageBegin('remove_filter', TMessageType.CALL, self._seqid)
args = remove_filter_args()
args.multilog_id = multilog_id
args.filter_name = filter_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_remove_filter(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = remove_filter_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ex is not None:
raise result.ex
return
def add_aggregate(self, mutlilog_id, aggregate_name, filter_name, aggregate_expr):
"""
Parameters:
- mutlilog_id
- aggregate_name
- filter_name
- aggregate_expr
"""
self.send_add_aggregate(mutlilog_id, aggregate_name, filter_name, aggregate_expr)
self.recv_add_aggregate()
def send_add_aggregate(self, mutlilog_id, aggregate_name, filter_name, aggregate_expr):
self._oprot.writeMessageBegin('add_aggregate', TMessageType.CALL, self._seqid)
args = add_aggregate_args()
args.mutlilog_id = mutlilog_id
args.aggregate_name = aggregate_name
args.filter_name = filter_name
args.aggregate_expr = aggregate_expr
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_add_aggregate(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = add_aggregate_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ex is not None:
raise result.ex
return
def remove_aggregate(self, multilog_id, aggregate_name):
"""
Parameters:
- multilog_id
- aggregate_name
"""
self.send_remove_aggregate(multilog_id, aggregate_name)
self.recv_remove_aggregate()
def send_remove_aggregate(self, multilog_id, aggregate_name):
self._oprot.writeMessageBegin('remove_aggregate', TMessageType.CALL, self._seqid)
args = remove_aggregate_args()
args.multilog_id = multilog_id
args.aggregate_name = aggregate_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_remove_aggregate(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = remove_aggregate_result()
result.read(iprot)
iprot.readMessageEnd()
return
def add_trigger(self, multilog_id, trigger_name, trigger_expr):
"""
Parameters:
- multilog_id
- trigger_name
- trigger_expr
"""
self.send_add_trigger(multilog_id, trigger_name, trigger_expr)
self.recv_add_trigger()
def send_add_trigger(self, multilog_id, trigger_name, trigger_expr):
self._oprot.writeMessageBegin('add_trigger', TMessageType.CALL, self._seqid)
args = add_trigger_args()
args.multilog_id = multilog_id
args.trigger_name = trigger_name
args.trigger_expr = trigger_expr
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_add_trigger(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = add_trigger_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ex is not None:
raise result.ex
return
def remove_trigger(self, multilog_id, trigger_name):
"""
Parameters:
- multilog_id
- trigger_name
"""
self.send_remove_trigger(multilog_id, trigger_name)
self.recv_remove_trigger()
def send_remove_trigger(self, multilog_id, trigger_name):
self._oprot.writeMessageBegin('remove_trigger', TMessageType.CALL, self._seqid)
args = remove_trigger_args()
args.multilog_id = multilog_id
args.trigger_name = trigger_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_remove_trigger(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = remove_trigger_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ex is not None:
raise result.ex
return
def append(self, multilog_id, data):
"""
Query ops *
Parameters:
- multilog_id
- data
"""
self.send_append(multilog_id, data)
return self.recv_append()
def send_append(self, multilog_id, data):
self._oprot.writeMessageBegin('append', TMessageType.CALL, self._seqid)
args = append_args()
args.multilog_id = multilog_id
args.data = data
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_append(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = append_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "append failed: unknown result")
def append_batch(self, multilog_id, batch):
"""
Parameters:
- multilog_id
- batch
"""
self.send_append_batch(multilog_id, batch)
return self.recv_append_batch()
def send_append_batch(self, multilog_id, batch):
self._oprot.writeMessageBegin('append_batch', TMessageType.CALL, self._seqid)
args = append_batch_args()
args.multilog_id = multilog_id
args.batch = batch
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_append_batch(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = append_batch_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "append_batch failed: unknown result")
def read(self, multilog_id, offset, nrecords):
"""
Parameters:
- multilog_id
- offset
- nrecords
"""
self.send_read(multilog_id, offset, nrecords)
return self.recv_read()
def send_read(self, multilog_id, offset, nrecords):
self._oprot.writeMessageBegin('read', TMessageType.CALL, self._seqid)
args = read_args()
args.multilog_id = multilog_id
args.offset = offset
args.nrecords = nrecords
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_read(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = read_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "read failed: unknown result")
def query_aggregate(self, multilog_id, aggregate_name, begin_ms, end_ms):
"""
Parameters:
- multilog_id
- aggregate_name
- begin_ms
- end_ms
"""
self.send_query_aggregate(multilog_id, aggregate_name, begin_ms, end_ms)
return self.recv_query_aggregate()
def send_query_aggregate(self, multilog_id, aggregate_name, begin_ms, end_ms):
self._oprot.writeMessageBegin('query_aggregate', TMessageType.CALL, self._seqid)
args = query_aggregate_args()
args.multilog_id = multilog_id
args.aggregate_name = aggregate_name
args.begin_ms = begin_ms
args.end_ms = end_ms
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_query_aggregate(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = query_aggregate_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ex is not None:
raise result.ex
raise TApplicationException(TApplicationException.MISSING_RESULT, "query_aggregate failed: unknown result")
def adhoc_aggregate(self, multilog_id, aggregate_expr, filter_expr):
"""
Parameters:
- multilog_id
- aggregate_expr
- filter_expr
"""
self.send_adhoc_aggregate(multilog_id, aggregate_expr, filter_expr)
return self.recv_adhoc_aggregate()
def send_adhoc_aggregate(self, multilog_id, aggregate_expr, filter_expr):
self._oprot.writeMessageBegin('adhoc_aggregate', TMessageType.CALL, self._seqid)
args = adhoc_aggregate_args()
args.multilog_id = multilog_id
args.aggregate_expr = aggregate_expr
args.filter_expr = filter_expr
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_adhoc_aggregate(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = adhoc_aggregate_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ex is not None:
raise result.ex
raise TApplicationException(TApplicationException.MISSING_RESULT, "adhoc_aggregate failed: unknown result")
def adhoc_filter(self, multilog_id, filter_expr):
"""
Parameters:
- multilog_id
- filter_expr
"""
self.send_adhoc_filter(multilog_id, filter_expr)
return self.recv_adhoc_filter()
def send_adhoc_filter(self, multilog_id, filter_expr):
self._oprot.writeMessageBegin('adhoc_filter', TMessageType.CALL, self._seqid)
args = adhoc_filter_args()
args.multilog_id = multilog_id
args.filter_expr = filter_expr
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_adhoc_filter(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = adhoc_filter_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ex is not None:
raise result.ex
raise TApplicationException(TApplicationException.MISSING_RESULT, "adhoc_filter failed: unknown result")
def predef_filter(self, multilog_id, filter_name, begin_ms, end_ms):
"""
Parameters:
- multilog_id
- filter_name
- begin_ms
- end_ms
"""
self.send_predef_filter(multilog_id, filter_name, begin_ms, end_ms)
return self.recv_predef_filter()
def send_predef_filter(self, multilog_id, filter_name, begin_ms, end_ms):
self._oprot.writeMessageBegin('predef_filter', TMessageType.CALL, self._seqid)
args = predef_filter_args()
args.multilog_id = multilog_id
args.filter_name = filter_name
args.begin_ms = begin_ms
args.end_ms = end_ms
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_predef_filter(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = predef_filter_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ex is not None:
raise result.ex
raise TApplicationException(TApplicationException.MISSING_RESULT, "predef_filter failed: unknown result")
def combined_filter(self, multilog_id, filter_name, filter_expr, begin_ms, end_ms):
"""
Parameters:
- multilog_id
- filter_name
- filter_expr
- begin_ms
- end_ms
"""
self.send_combined_filter(multilog_id, filter_name, filter_expr, begin_ms, end_ms)
return self.recv_combined_filter()
def send_combined_filter(self, multilog_id, filter_name, filter_expr, begin_ms, end_ms):
self._oprot.writeMessageBegin('combined_filter', TMessageType.CALL, self._seqid)
args = combined_filter_args()
args.multilog_id = multilog_id
args.filter_name = filter_name
args.filter_expr = filter_expr
args.begin_ms = begin_ms
args.end_ms = end_ms
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_combined_filter(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = combined_filter_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ex is not None:
raise result.ex
raise TApplicationException(TApplicationException.MISSING_RESULT, "combined_filter failed: unknown result")
def alerts_by_time(self, multilog_id, begin_ms, end_ms):
"""
Parameters:
- multilog_id
- begin_ms
- end_ms
"""
self.send_alerts_by_time(multilog_id, begin_ms, end_ms)
return self.recv_alerts_by_time()
def send_alerts_by_time(self, multilog_id, begin_ms, end_ms):
self._oprot.writeMessageBegin('alerts_by_time', TMessageType.CALL, self._seqid)
args = alerts_by_time_args()
args.multilog_id = multilog_id
args.begin_ms = begin_ms
args.end_ms = end_ms
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_alerts_by_time(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = alerts_by_time_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ex is not None:
raise result.ex
raise TApplicationException(TApplicationException.MISSING_RESULT, "alerts_by_time failed: unknown result")
def alerts_by_trigger_and_time(self, multilog_id, trigger_name, begin_ms, end_ms):
"""
Parameters:
- multilog_id
- trigger_name
- begin_ms
- end_ms
"""
self.send_alerts_by_trigger_and_time(multilog_id, trigger_name, begin_ms, end_ms)
return self.recv_alerts_by_trigger_and_time()
def send_alerts_by_trigger_and_time(self, multilog_id, trigger_name, begin_ms, end_ms):
self._oprot.writeMessageBegin('alerts_by_trigger_and_time', TMessageType.CALL, self._seqid)
args = alerts_by_trigger_and_time_args()
args.multilog_id = multilog_id
args.trigger_name = trigger_name
args.begin_ms = begin_ms
args.end_ms = end_ms
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_alerts_by_trigger_and_time(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = alerts_by_trigger_and_time_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ex is not None:
raise result.ex
raise TApplicationException(TApplicationException.MISSING_RESULT, "alerts_by_trigger_and_time failed: unknown result")
def get_more(self, multilog_id, desc):
"""
Parameters:
- multilog_id
- desc
"""
self.send_get_more(multilog_id, desc)
return self.recv_get_more()
def send_get_more(self, multilog_id, desc):
self._oprot.writeMessageBegin('get_more', TMessageType.CALL, self._seqid)
args = get_more_args()
args.multilog_id = multilog_id
args.desc = desc
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_more(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = get_more_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ex is not None:
raise result.ex
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_more failed: unknown result")
def num_records(self, multilog_id):
"""
Parameters:
- multilog_id
"""
self.send_num_records(multilog_id)
return self.recv_num_records()
def send_num_records(self, multilog_id):
self._oprot.writeMessageBegin('num_records', TMessageType.CALL, self._seqid)
args = num_records_args()
args.multilog_id = multilog_id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_num_records(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = num_records_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "num_records failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["register_handler"] = Processor.process_register_handler
self._processMap["deregister_handler"] = Processor.process_deregister_handler
self._processMap["create_atomic_multilog"] = Processor.process_create_atomic_multilog
self._processMap["get_atomic_multilog_info"] = Processor.process_get_atomic_multilog_info
self._processMap["remove_atomic_multilog"] = Processor.process_remove_atomic_multilog
self._processMap["add_index"] = Processor.process_add_index
self._processMap["remove_index"] = Processor.process_remove_index
self._processMap["add_filter"] = Processor.process_add_filter
self._processMap["remove_filter"] = Processor.process_remove_filter
self._processMap["add_aggregate"] = Processor.process_add_aggregate
self._processMap["remove_aggregate"] = Processor.process_remove_aggregate
self._processMap["add_trigger"] = Processor.process_add_trigger
self._processMap["remove_trigger"] = Processor.process_remove_trigger
self._processMap["append"] = Processor.process_append
self._processMap["append_batch"] = Processor.process_append_batch
self._processMap["read"] = Processor.process_read
self._processMap["query_aggregate"] = Processor.process_query_aggregate
self._processMap["adhoc_aggregate"] = Processor.process_adhoc_aggregate
self._processMap["adhoc_filter"] = Processor.process_adhoc_filter
self._processMap["predef_filter"] = Processor.process_predef_filter
self._processMap["combined_filter"] = Processor.process_combined_filter
self._processMap["alerts_by_time"] = Processor.process_alerts_by_time
self._processMap["alerts_by_trigger_and_time"] = Processor.process_alerts_by_trigger_and_time
self._processMap["get_more"] = Processor.process_get_more
self._processMap["num_records"] = Processor.process_num_records
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_register_handler(self, seqid, iprot, oprot):
args = register_handler_args()
args.read(iprot)
iprot.readMessageEnd()
result = register_handler_result()
try:
self._handler.register_handler()
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except rpc_management_exception as ex:
msg_type = TMessageType.REPLY
result.ex = ex
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("register_handler", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deregister_handler(self, seqid, iprot, oprot):
args = deregister_handler_args()
args.read(iprot)
iprot.readMessageEnd()
result = deregister_handler_result()
try:
self._handler.deregister_handler()
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except rpc_management_exception as ex:
msg_type = TMessageType.REPLY
result.ex = ex
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("deregister_handler", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_create_atomic_multilog(self, seqid, iprot, oprot):
args = create_atomic_multilog_args()
args.read(iprot)
iprot.readMessageEnd()
result = create_atomic_multilog_result()
try:
result.success = self._handler.create_atomic_multilog(args.name, args.schema, args.mode)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except rpc_management_exception as ex:
msg_type = TMessageType.REPLY
result.ex = ex
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("create_atomic_multilog", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_atomic_multilog_info(self, seqid, iprot, oprot):
args = get_atomic_multilog_info_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_atomic_multilog_info_result()
try:
result.success = self._handler.get_atomic_multilog_info(args.name)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("get_atomic_multilog_info", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_remove_atomic_multilog(self, seqid, iprot, oprot):
args = remove_atomic_multilog_args()
args.read(iprot)
iprot.readMessageEnd()
result = remove_atomic_multilog_result()
try:
self._handler.remove_atomic_multilog(args.multilog_id)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except rpc_management_exception as ex:
msg_type = TMessageType.REPLY
result.ex = ex
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("remove_atomic_multilog", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_add_index(self, seqid, iprot, oprot):
args = add_index_args()
args.read(iprot)
iprot.readMessageEnd()
result = add_index_result()
try:
self._handler.add_index(args.multilog_id, args.field_name, args.bucket_size)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except rpc_management_exception as ex:
msg_type = TMessageType.REPLY
result.ex = ex
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("add_index", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_remove_index(self, seqid, iprot, oprot):
args = remove_index_args()
args.read(iprot)
iprot.readMessageEnd()
result = remove_index_result()
try:
self._handler.remove_index(args.multilog_id, args.field_name)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except rpc_management_exception as ex:
msg_type = TMessageType.REPLY
result.ex = ex
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("remove_index", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_add_filter(self, seqid, iprot, oprot):
args = add_filter_args()
args.read(iprot)
iprot.readMessageEnd()
result = add_filter_result()
try:
self._handler.add_filter(args.multilog_id, args.filter_name, args.filter_expr)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except rpc_management_exception as ex:
msg_type = TMessageType.REPLY
result.ex = ex
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("add_filter", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_remove_filter(self, seqid, iprot, oprot):
args = remove_filter_args()
args.read(iprot)
iprot.readMessageEnd()
result = remove_filter_result()
try:
self._handler.remove_filter(args.multilog_id, args.filter_name)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except rpc_management_exception as ex:
msg_type = TMessageType.REPLY
result.ex = ex
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("remove_filter", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_add_aggregate(self, seqid, iprot, oprot):
args = add_aggregate_args()
args.read(iprot)
iprot.readMessageEnd()
result = add_aggregate_result()
try:
self._handler.add_aggregate(args.mutlilog_id, args.aggregate_name, args.filter_name, args.aggregate_expr)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except rpc_management_exception as ex:
msg_type = TMessageType.REPLY
result.ex = ex
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("add_aggregate", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_remove_aggregate(self, seqid, iprot, oprot):
args = remove_aggregate_args()
args.read(iprot)
iprot.readMessageEnd()
result = remove_aggregate_result()
try:
self._handler.remove_aggregate(args.multilog_id, args.aggregate_name)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("remove_aggregate", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_add_trigger(self, seqid, iprot, oprot):
args = add_trigger_args()
args.read(iprot)
iprot.readMessageEnd()
result = add_trigger_result()
try:
self._handler.add_trigger(args.multilog_id, args.trigger_name, args.trigger_expr)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except rpc_management_exception as ex:
msg_type = TMessageType.REPLY
result.ex = ex
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("add_trigger", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_remove_trigger(self, seqid, iprot, oprot):
args = remove_trigger_args()
args.read(iprot)
iprot.readMessageEnd()
result = remove_trigger_result()
try:
self._handler.remove_trigger(args.multilog_id, args.trigger_name)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except rpc_management_exception as ex:
msg_type = TMessageType.REPLY
result.ex = ex
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("remove_trigger", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_append(self, seqid, iprot, oprot):
args = append_args()
args.read(iprot)
iprot.readMessageEnd()
result = append_result()
try:
result.success = self._handler.append(args.multilog_id, args.data)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("append", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_append_batch(self, seqid, iprot, oprot):
args = append_batch_args()
args.read(iprot)
iprot.readMessageEnd()
result = append_batch_result()
try:
result.success = self._handler.append_batch(args.multilog_id, args.batch)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("append_batch", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_read(self, seqid, iprot, oprot):
args = read_args()
args.read(iprot)
iprot.readMessageEnd()
result = read_result()
try:
result.success = self._handler.read(args.multilog_id, args.offset, args.nrecords)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("read", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_query_aggregate(self, seqid, iprot, oprot):
args = query_aggregate_args()
args.read(iprot)
iprot.readMessageEnd()
result = query_aggregate_result()
try:
result.success = self._handler.query_aggregate(args.multilog_id, args.aggregate_name, args.begin_ms, args.end_ms)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except rpc_invalid_operation as ex:
msg_type = TMessageType.REPLY
result.ex = ex
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("query_aggregate", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_adhoc_aggregate(self, seqid, iprot, oprot):
args = adhoc_aggregate_args()
args.read(iprot)
iprot.readMessageEnd()
result = adhoc_aggregate_result()
try:
result.success = self._handler.adhoc_aggregate(args.multilog_id, args.aggregate_expr, args.filter_expr)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except rpc_invalid_operation as ex:
msg_type = TMessageType.REPLY
result.ex = ex
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("adhoc_aggregate", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_adhoc_filter(self, seqid, iprot, oprot):
args = adhoc_filter_args()
args.read(iprot)
iprot.readMessageEnd()
result = adhoc_filter_result()
try:
result.success = self._handler.adhoc_filter(args.multilog_id, args.filter_expr)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except rpc_invalid_operation as ex:
msg_type = TMessageType.REPLY
result.ex = ex
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("adhoc_filter", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_predef_filter(self, seqid, iprot, oprot):
args = predef_filter_args()
args.read(iprot)
iprot.readMessageEnd()
result = predef_filter_result()
try:
result.success = self._handler.predef_filter(args.multilog_id, args.filter_name, args.begin_ms, args.end_ms)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except rpc_invalid_operation as ex:
msg_type = TMessageType.REPLY
result.ex = ex
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("predef_filter", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_combined_filter(self, seqid, iprot, oprot):
args = combined_filter_args()
args.read(iprot)
iprot.readMessageEnd()
result = combined_filter_result()
try:
result.success = self._handler.combined_filter(args.multilog_id, args.filter_name, args.filter_expr, args.begin_ms, args.end_ms)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except rpc_invalid_operation as ex:
msg_type = TMessageType.REPLY
result.ex = ex
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("combined_filter", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_alerts_by_time(self, seqid, iprot, oprot):
args = alerts_by_time_args()
args.read(iprot)
iprot.readMessageEnd()
result = alerts_by_time_result()
try:
result.success = self._handler.alerts_by_time(args.multilog_id, args.begin_ms, args.end_ms)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except rpc_invalid_operation as ex:
msg_type = TMessageType.REPLY
result.ex = ex
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("alerts_by_time", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_alerts_by_trigger_and_time(self, seqid, iprot, oprot):
args = alerts_by_trigger_and_time_args()
args.read(iprot)
iprot.readMessageEnd()
result = alerts_by_trigger_and_time_result()
try:
result.success = self._handler.alerts_by_trigger_and_time(args.multilog_id, args.trigger_name, args.begin_ms, args.end_ms)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except rpc_invalid_operation as ex:
msg_type = TMessageType.REPLY
result.ex = ex
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("alerts_by_trigger_and_time", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_more(self, seqid, iprot, oprot):
args = get_more_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_more_result()
try:
result.success = self._handler.get_more(args.multilog_id, args.desc)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except rpc_invalid_operation as ex:
msg_type = TMessageType.REPLY
result.ex = ex
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("get_more", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_num_records(self, seqid, iprot, oprot):
args = num_records_args()
args.read(iprot)
iprot.readMessageEnd()
result = num_records_result()
try:
result.success = self._handler.num_records(args.multilog_id)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("num_records", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class register_handler_args(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('register_handler_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(register_handler_args)
register_handler_args.thrift_spec = (
)
class register_handler_result(object):
"""
Attributes:
- ex
"""
def __init__(self, ex=None,):
self.ex = ex
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ex = rpc_management_exception()
self.ex.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('register_handler_result')
if self.ex is not None:
oprot.writeFieldBegin('ex', TType.STRUCT, 1)
self.ex.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(register_handler_result)
register_handler_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ex', [rpc_management_exception, None], None, ), # 1
)
class deregister_handler_args(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('deregister_handler_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(deregister_handler_args)
deregister_handler_args.thrift_spec = (
)
class deregister_handler_result(object):
"""
Attributes:
- ex
"""
def __init__(self, ex=None,):
self.ex = ex
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ex = rpc_management_exception()
self.ex.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('deregister_handler_result')
if self.ex is not None:
oprot.writeFieldBegin('ex', TType.STRUCT, 1)
self.ex.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(deregister_handler_result)
deregister_handler_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ex', [rpc_management_exception, None], None, ), # 1
)
class create_atomic_multilog_args(object):
"""
Attributes:
- name
- schema
- mode
"""
def __init__(self, name=None, schema=None, mode=None,):
self.name = name
self.schema = schema
self.mode = mode
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.schema = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in range(_size14):
_elem19 = rpc_column()
_elem19.read(iprot)
self.schema.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.mode = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('create_atomic_multilog_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
if self.schema is not None:
oprot.writeFieldBegin('schema', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.schema))
for iter20 in self.schema:
iter20.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.mode is not None:
oprot.writeFieldBegin('mode', TType.I32, 3)
oprot.writeI32(self.mode)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(create_atomic_multilog_args)
create_atomic_multilog_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', 'UTF8', None, ), # 1
(2, TType.LIST, 'schema', (TType.STRUCT, [rpc_column, None], False), None, ), # 2
(3, TType.I32, 'mode', None, None, ), # 3
)
class create_atomic_multilog_result(object):
"""
Attributes:
- success
- ex
"""
def __init__(self, success=None, ex=None,):
self.success = success
self.ex = ex
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I64:
self.success = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ex = rpc_management_exception()
self.ex.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('create_atomic_multilog_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I64, 0)
oprot.writeI64(self.success)
oprot.writeFieldEnd()
if self.ex is not None:
oprot.writeFieldBegin('ex', TType.STRUCT, 1)
self.ex.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(create_atomic_multilog_result)
create_atomic_multilog_result.thrift_spec = (
(0, TType.I64, 'success', None, None, ), # 0
(1, TType.STRUCT, 'ex', [rpc_management_exception, None], None, ), # 1
)
class get_atomic_multilog_info_args(object):
"""
Attributes:
- name
"""
def __init__(self, name=None,):
self.name = name
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('get_atomic_multilog_info_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(get_atomic_multilog_info_args)
get_atomic_multilog_info_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', 'UTF8', None, ), # 1
)
class get_atomic_multilog_info_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = rpc_atomic_multilog_info()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('get_atomic_multilog_info_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(get_atomic_multilog_info_result)
get_atomic_multilog_info_result.thrift_spec = (
(0, TType.STRUCT, 'success', [rpc_atomic_multilog_info, None], None, ), # 0
)
class remove_atomic_multilog_args(object):
"""
Attributes:
- multilog_id
"""
def __init__(self, multilog_id=None,):
self.multilog_id = multilog_id
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.multilog_id = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('remove_atomic_multilog_args')
if self.multilog_id is not None:
oprot.writeFieldBegin('multilog_id', TType.I64, 1)
oprot.writeI64(self.multilog_id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(remove_atomic_multilog_args)
remove_atomic_multilog_args.thrift_spec = (
None, # 0
(1, TType.I64, 'multilog_id', None, None, ), # 1
)
class remove_atomic_multilog_result(object):
"""
Attributes:
- ex
"""
def __init__(self, ex=None,):
self.ex = ex
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ex = rpc_management_exception()
self.ex.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('remove_atomic_multilog_result')
if self.ex is not None:
oprot.writeFieldBegin('ex', TType.STRUCT, 1)
self.ex.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(remove_atomic_multilog_result)
remove_atomic_multilog_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ex', [rpc_management_exception, None], None, ), # 1
)
class add_index_args(object):
"""
Attributes:
- multilog_id
- field_name
- bucket_size
"""
def __init__(self, multilog_id=None, field_name=None, bucket_size=None,):
self.multilog_id = multilog_id
self.field_name = field_name
self.bucket_size = bucket_size
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.multilog_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.field_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.DOUBLE:
self.bucket_size = iprot.readDouble()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('add_index_args')
if self.multilog_id is not None:
oprot.writeFieldBegin('multilog_id', TType.I64, 1)
oprot.writeI64(self.multilog_id)
oprot.writeFieldEnd()
if self.field_name is not None:
oprot.writeFieldBegin('field_name', TType.STRING, 2)
oprot.writeString(self.field_name.encode('utf-8') if sys.version_info[0] == 2 else self.field_name)
oprot.writeFieldEnd()
if self.bucket_size is not None:
oprot.writeFieldBegin('bucket_size', TType.DOUBLE, 3)
oprot.writeDouble(self.bucket_size)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(add_index_args)
add_index_args.thrift_spec = (
None, # 0
(1, TType.I64, 'multilog_id', None, None, ), # 1
(2, TType.STRING, 'field_name', 'UTF8', None, ), # 2
(3, TType.DOUBLE, 'bucket_size', None, None, ), # 3
)
class add_index_result(object):
"""
Attributes:
- ex
"""
def __init__(self, ex=None,):
self.ex = ex
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ex = rpc_management_exception()
self.ex.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('add_index_result')
if self.ex is not None:
oprot.writeFieldBegin('ex', TType.STRUCT, 1)
self.ex.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(add_index_result)
add_index_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ex', [rpc_management_exception, None], None, ), # 1
)
class remove_index_args(object):
"""
Attributes:
- multilog_id
- field_name
"""
def __init__(self, multilog_id=None, field_name=None,):
self.multilog_id = multilog_id
self.field_name = field_name
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.multilog_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.field_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('remove_index_args')
if self.multilog_id is not None:
oprot.writeFieldBegin('multilog_id', TType.I64, 1)
oprot.writeI64(self.multilog_id)
oprot.writeFieldEnd()
if self.field_name is not None:
oprot.writeFieldBegin('field_name', TType.STRING, 2)
oprot.writeString(self.field_name.encode('utf-8') if sys.version_info[0] == 2 else self.field_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(remove_index_args)
remove_index_args.thrift_spec = (
None, # 0
(1, TType.I64, 'multilog_id', None, None, ), # 1
(2, TType.STRING, 'field_name', 'UTF8', None, ), # 2
)
class remove_index_result(object):
"""
Attributes:
- ex
"""
def __init__(self, ex=None,):
self.ex = ex
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ex = rpc_management_exception()
self.ex.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('remove_index_result')
if self.ex is not None:
oprot.writeFieldBegin('ex', TType.STRUCT, 1)
self.ex.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(remove_index_result)
remove_index_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ex', [rpc_management_exception, None], None, ), # 1
)
class add_filter_args(object):
"""
Attributes:
- multilog_id
- filter_name
- filter_expr
"""
def __init__(self, multilog_id=None, filter_name=None, filter_expr=None,):
self.multilog_id = multilog_id
self.filter_name = filter_name
self.filter_expr = filter_expr
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.multilog_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.filter_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.filter_expr = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('add_filter_args')
if self.multilog_id is not None:
oprot.writeFieldBegin('multilog_id', TType.I64, 1)
oprot.writeI64(self.multilog_id)
oprot.writeFieldEnd()
if self.filter_name is not None:
oprot.writeFieldBegin('filter_name', TType.STRING, 2)
oprot.writeString(self.filter_name.encode('utf-8') if sys.version_info[0] == 2 else self.filter_name)
oprot.writeFieldEnd()
if self.filter_expr is not None:
oprot.writeFieldBegin('filter_expr', TType.STRING, 3)
oprot.writeString(self.filter_expr.encode('utf-8') if sys.version_info[0] == 2 else self.filter_expr)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(add_filter_args)
add_filter_args.thrift_spec = (
None, # 0
(1, TType.I64, 'multilog_id', None, None, ), # 1
(2, TType.STRING, 'filter_name', 'UTF8', None, ), # 2
(3, TType.STRING, 'filter_expr', 'UTF8', None, ), # 3
)
class add_filter_result(object):
"""
Attributes:
- ex
"""
def __init__(self, ex=None,):
self.ex = ex
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ex = rpc_management_exception()
self.ex.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('add_filter_result')
if self.ex is not None:
oprot.writeFieldBegin('ex', TType.STRUCT, 1)
self.ex.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(add_filter_result)
add_filter_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ex', [rpc_management_exception, None], None, ), # 1
)
class remove_filter_args(object):
"""
Attributes:
- multilog_id
- filter_name
"""
def __init__(self, multilog_id=None, filter_name=None,):
self.multilog_id = multilog_id
self.filter_name = filter_name
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.multilog_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.filter_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('remove_filter_args')
if self.multilog_id is not None:
oprot.writeFieldBegin('multilog_id', TType.I64, 1)
oprot.writeI64(self.multilog_id)
oprot.writeFieldEnd()
if self.filter_name is not None:
oprot.writeFieldBegin('filter_name', TType.STRING, 2)
oprot.writeString(self.filter_name.encode('utf-8') if sys.version_info[0] == 2 else self.filter_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(remove_filter_args)
remove_filter_args.thrift_spec = (
None, # 0
(1, TType.I64, 'multilog_id', None, None, ), # 1
(2, TType.STRING, 'filter_name', 'UTF8', None, ), # 2
)
class remove_filter_result(object):
"""
Attributes:
- ex
"""
def __init__(self, ex=None,):
self.ex = ex
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ex = rpc_management_exception()
self.ex.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('remove_filter_result')
if self.ex is not None:
oprot.writeFieldBegin('ex', TType.STRUCT, 1)
self.ex.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(remove_filter_result)
remove_filter_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ex', [rpc_management_exception, None], None, ), # 1
)
class add_aggregate_args(object):
"""
Attributes:
- mutlilog_id
- aggregate_name
- filter_name
- aggregate_expr
"""
def __init__(self, mutlilog_id=None, aggregate_name=None, filter_name=None, aggregate_expr=None,):
self.mutlilog_id = mutlilog_id
self.aggregate_name = aggregate_name
self.filter_name = filter_name
self.aggregate_expr = aggregate_expr
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.mutlilog_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.aggregate_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.filter_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.aggregate_expr = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('add_aggregate_args')
if self.mutlilog_id is not None:
oprot.writeFieldBegin('mutlilog_id', TType.I64, 1)
oprot.writeI64(self.mutlilog_id)
oprot.writeFieldEnd()
if self.aggregate_name is not None:
oprot.writeFieldBegin('aggregate_name', TType.STRING, 2)
oprot.writeString(self.aggregate_name.encode('utf-8') if sys.version_info[0] == 2 else self.aggregate_name)
oprot.writeFieldEnd()
if self.filter_name is not None:
oprot.writeFieldBegin('filter_name', TType.STRING, 3)
oprot.writeString(self.filter_name.encode('utf-8') if sys.version_info[0] == 2 else self.filter_name)
oprot.writeFieldEnd()
if self.aggregate_expr is not None:
oprot.writeFieldBegin('aggregate_expr', TType.STRING, 4)
oprot.writeString(self.aggregate_expr.encode('utf-8') if sys.version_info[0] == 2 else self.aggregate_expr)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(add_aggregate_args)
add_aggregate_args.thrift_spec = (
None, # 0
(1, TType.I64, 'mutlilog_id', None, None, ), # 1
(2, TType.STRING, 'aggregate_name', 'UTF8', None, ), # 2
(3, TType.STRING, 'filter_name', 'UTF8', None, ), # 3
(4, TType.STRING, 'aggregate_expr', 'UTF8', None, ), # 4
)
class add_aggregate_result(object):
"""
Attributes:
- ex
"""
def __init__(self, ex=None,):
self.ex = ex
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ex = rpc_management_exception()
self.ex.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('add_aggregate_result')
if self.ex is not None:
oprot.writeFieldBegin('ex', TType.STRUCT, 1)
self.ex.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(add_aggregate_result)
add_aggregate_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ex', [rpc_management_exception, None], None, ), # 1
)
class remove_aggregate_args(object):
"""
Attributes:
- multilog_id
- aggregate_name
"""
def __init__(self, multilog_id=None, aggregate_name=None,):
self.multilog_id = multilog_id
self.aggregate_name = aggregate_name
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.multilog_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.aggregate_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('remove_aggregate_args')
if self.multilog_id is not None:
oprot.writeFieldBegin('multilog_id', TType.I64, 1)
oprot.writeI64(self.multilog_id)
oprot.writeFieldEnd()
if self.aggregate_name is not None:
oprot.writeFieldBegin('aggregate_name', TType.STRING, 2)
oprot.writeString(self.aggregate_name.encode('utf-8') if sys.version_info[0] == 2 else self.aggregate_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(remove_aggregate_args)
remove_aggregate_args.thrift_spec = (
None, # 0
(1, TType.I64, 'multilog_id', None, None, ), # 1
(2, TType.STRING, 'aggregate_name', 'UTF8', None, ), # 2
)
class remove_aggregate_result(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('remove_aggregate_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(remove_aggregate_result)
remove_aggregate_result.thrift_spec = (
)
class add_trigger_args(object):
"""
Attributes:
- multilog_id
- trigger_name
- trigger_expr
"""
def __init__(self, multilog_id=None, trigger_name=None, trigger_expr=None,):
self.multilog_id = multilog_id
self.trigger_name = trigger_name
self.trigger_expr = trigger_expr
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.multilog_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.trigger_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.trigger_expr = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('add_trigger_args')
if self.multilog_id is not None:
oprot.writeFieldBegin('multilog_id', TType.I64, 1)
oprot.writeI64(self.multilog_id)
oprot.writeFieldEnd()
if self.trigger_name is not None:
oprot.writeFieldBegin('trigger_name', TType.STRING, 2)
oprot.writeString(self.trigger_name.encode('utf-8') if sys.version_info[0] == 2 else self.trigger_name)
oprot.writeFieldEnd()
if self.trigger_expr is not None:
oprot.writeFieldBegin('trigger_expr', TType.STRING, 3)
oprot.writeString(self.trigger_expr.encode('utf-8') if sys.version_info[0] == 2 else self.trigger_expr)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(add_trigger_args)
add_trigger_args.thrift_spec = (
None, # 0
(1, TType.I64, 'multilog_id', None, None, ), # 1
(2, TType.STRING, 'trigger_name', 'UTF8', None, ), # 2
(3, TType.STRING, 'trigger_expr', 'UTF8', None, ), # 3
)
class add_trigger_result(object):
"""
Attributes:
- ex
"""
def __init__(self, ex=None,):
self.ex = ex
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ex = rpc_management_exception()
self.ex.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('add_trigger_result')
if self.ex is not None:
oprot.writeFieldBegin('ex', TType.STRUCT, 1)
self.ex.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(add_trigger_result)
add_trigger_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ex', [rpc_management_exception, None], None, ), # 1
)
class remove_trigger_args(object):
"""
Attributes:
- multilog_id
- trigger_name
"""
def __init__(self, multilog_id=None, trigger_name=None,):
self.multilog_id = multilog_id
self.trigger_name = trigger_name
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.multilog_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.trigger_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('remove_trigger_args')
if self.multilog_id is not None:
oprot.writeFieldBegin('multilog_id', TType.I64, 1)
oprot.writeI64(self.multilog_id)
oprot.writeFieldEnd()
if self.trigger_name is not None:
oprot.writeFieldBegin('trigger_name', TType.STRING, 2)
oprot.writeString(self.trigger_name.encode('utf-8') if sys.version_info[0] == 2 else self.trigger_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(remove_trigger_args)
remove_trigger_args.thrift_spec = (
None, # 0
(1, TType.I64, 'multilog_id', None, None, ), # 1
(2, TType.STRING, 'trigger_name', 'UTF8', None, ), # 2
)
class remove_trigger_result(object):
"""
Attributes:
- ex
"""
def __init__(self, ex=None,):
self.ex = ex
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ex = rpc_management_exception()
self.ex.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('remove_trigger_result')
if self.ex is not None:
oprot.writeFieldBegin('ex', TType.STRUCT, 1)
self.ex.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(remove_trigger_result)
remove_trigger_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ex', [rpc_management_exception, None], None, ), # 1
)
class append_args(object):
"""
Attributes:
- multilog_id
- data
"""
def __init__(self, multilog_id=None, data=None,):
self.multilog_id = multilog_id
self.data = data
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.multilog_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.data = iprot.readBinary()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('append_args')
if self.multilog_id is not None:
oprot.writeFieldBegin('multilog_id', TType.I64, 1)
oprot.writeI64(self.multilog_id)
oprot.writeFieldEnd()
if self.data is not None:
oprot.writeFieldBegin('data', TType.STRING, 2)
oprot.writeBinary(self.data)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(append_args)
append_args.thrift_spec = (
None, # 0
(1, TType.I64, 'multilog_id', None, None, ), # 1
(2, TType.STRING, 'data', 'BINARY', None, ), # 2
)
class append_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I64:
self.success = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('append_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I64, 0)
oprot.writeI64(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(append_result)
append_result.thrift_spec = (
(0, TType.I64, 'success', None, None, ), # 0
)
class append_batch_args(object):
"""
Attributes:
- multilog_id
- batch
"""
def __init__(self, multilog_id=None, batch=None,):
self.multilog_id = multilog_id
self.batch = batch
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.multilog_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.batch = rpc_record_batch()
self.batch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('append_batch_args')
if self.multilog_id is not None:
oprot.writeFieldBegin('multilog_id', TType.I64, 1)
oprot.writeI64(self.multilog_id)
oprot.writeFieldEnd()
if self.batch is not None:
oprot.writeFieldBegin('batch', TType.STRUCT, 2)
self.batch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(append_batch_args)
append_batch_args.thrift_spec = (
None, # 0
(1, TType.I64, 'multilog_id', None, None, ), # 1
(2, TType.STRUCT, 'batch', [rpc_record_batch, None], None, ), # 2
)
class append_batch_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I64:
self.success = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('append_batch_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I64, 0)
oprot.writeI64(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(append_batch_result)
append_batch_result.thrift_spec = (
(0, TType.I64, 'success', None, None, ), # 0
)
class read_args(object):
"""
Attributes:
- multilog_id
- offset
- nrecords
"""
def __init__(self, multilog_id=None, offset=None, nrecords=None,):
self.multilog_id = multilog_id
self.offset = offset
self.nrecords = nrecords
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.multilog_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.offset = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.nrecords = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('read_args')
if self.multilog_id is not None:
oprot.writeFieldBegin('multilog_id', TType.I64, 1)
oprot.writeI64(self.multilog_id)
oprot.writeFieldEnd()
if self.offset is not None:
oprot.writeFieldBegin('offset', TType.I64, 2)
oprot.writeI64(self.offset)
oprot.writeFieldEnd()
if self.nrecords is not None:
oprot.writeFieldBegin('nrecords', TType.I64, 3)
oprot.writeI64(self.nrecords)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(read_args)
read_args.thrift_spec = (
None, # 0
(1, TType.I64, 'multilog_id', None, None, ), # 1
(2, TType.I64, 'offset', None, None, ), # 2
(3, TType.I64, 'nrecords', None, None, ), # 3
)
class read_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readBinary()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('read_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeBinary(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(read_result)
read_result.thrift_spec = (
(0, TType.STRING, 'success', 'BINARY', None, ), # 0
)
class query_aggregate_args(object):
"""
Attributes:
- multilog_id
- aggregate_name
- begin_ms
- end_ms
"""
def __init__(self, multilog_id=None, aggregate_name=None, begin_ms=None, end_ms=None,):
self.multilog_id = multilog_id
self.aggregate_name = aggregate_name
self.begin_ms = begin_ms
self.end_ms = end_ms
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.multilog_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.aggregate_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.begin_ms = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.end_ms = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('query_aggregate_args')
if self.multilog_id is not None:
oprot.writeFieldBegin('multilog_id', TType.I64, 1)
oprot.writeI64(self.multilog_id)
oprot.writeFieldEnd()
if self.aggregate_name is not None:
oprot.writeFieldBegin('aggregate_name', TType.STRING, 2)
oprot.writeString(self.aggregate_name.encode('utf-8') if sys.version_info[0] == 2 else self.aggregate_name)
oprot.writeFieldEnd()
if self.begin_ms is not None:
oprot.writeFieldBegin('begin_ms', TType.I64, 3)
oprot.writeI64(self.begin_ms)
oprot.writeFieldEnd()
if self.end_ms is not None:
oprot.writeFieldBegin('end_ms', TType.I64, 4)
oprot.writeI64(self.end_ms)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(query_aggregate_args)
query_aggregate_args.thrift_spec = (
None, # 0
(1, TType.I64, 'multilog_id', None, None, ), # 1
(2, TType.STRING, 'aggregate_name', 'UTF8', None, ), # 2
(3, TType.I64, 'begin_ms', None, None, ), # 3
(4, TType.I64, 'end_ms', None, None, ), # 4
)
class query_aggregate_result(object):
"""
Attributes:
- success
- ex
"""
def __init__(self, success=None, ex=None,):
self.success = success
self.ex = ex
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ex = rpc_invalid_operation()
self.ex.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('query_aggregate_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
if self.ex is not None:
oprot.writeFieldBegin('ex', TType.STRUCT, 1)
self.ex.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(query_aggregate_result)
query_aggregate_result.thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
(1, TType.STRUCT, 'ex', [rpc_invalid_operation, None], None, ), # 1
)
class adhoc_aggregate_args(object):
"""
Attributes:
- multilog_id
- aggregate_expr
- filter_expr
"""
def __init__(self, multilog_id=None, aggregate_expr=None, filter_expr=None,):
self.multilog_id = multilog_id
self.aggregate_expr = aggregate_expr
self.filter_expr = filter_expr
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.multilog_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.aggregate_expr = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.filter_expr = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('adhoc_aggregate_args')
if self.multilog_id is not None:
oprot.writeFieldBegin('multilog_id', TType.I64, 1)
oprot.writeI64(self.multilog_id)
oprot.writeFieldEnd()
if self.aggregate_expr is not None:
oprot.writeFieldBegin('aggregate_expr', TType.STRING, 2)
oprot.writeString(self.aggregate_expr.encode('utf-8') if sys.version_info[0] == 2 else self.aggregate_expr)
oprot.writeFieldEnd()
if self.filter_expr is not None:
oprot.writeFieldBegin('filter_expr', TType.STRING, 3)
oprot.writeString(self.filter_expr.encode('utf-8') if sys.version_info[0] == 2 else self.filter_expr)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(adhoc_aggregate_args)
adhoc_aggregate_args.thrift_spec = (
None, # 0
(1, TType.I64, 'multilog_id', None, None, ), # 1
(2, TType.STRING, 'aggregate_expr', 'UTF8', None, ), # 2
(3, TType.STRING, 'filter_expr', 'UTF8', None, ), # 3
)
class adhoc_aggregate_result(object):
"""
Attributes:
- success
- ex
"""
def __init__(self, success=None, ex=None,):
self.success = success
self.ex = ex
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ex = rpc_invalid_operation()
self.ex.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('adhoc_aggregate_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
if self.ex is not None:
oprot.writeFieldBegin('ex', TType.STRUCT, 1)
self.ex.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(adhoc_aggregate_result)
adhoc_aggregate_result.thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
(1, TType.STRUCT, 'ex', [rpc_invalid_operation, None], None, ), # 1
)
class adhoc_filter_args(object):
"""
Attributes:
- multilog_id
- filter_expr
"""
def __init__(self, multilog_id=None, filter_expr=None,):
self.multilog_id = multilog_id
self.filter_expr = filter_expr
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.multilog_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.filter_expr = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('adhoc_filter_args')
if self.multilog_id is not None:
oprot.writeFieldBegin('multilog_id', TType.I64, 1)
oprot.writeI64(self.multilog_id)
oprot.writeFieldEnd()
if self.filter_expr is not None:
oprot.writeFieldBegin('filter_expr', TType.STRING, 2)
oprot.writeString(self.filter_expr.encode('utf-8') if sys.version_info[0] == 2 else self.filter_expr)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(adhoc_filter_args)
adhoc_filter_args.thrift_spec = (
None, # 0
(1, TType.I64, 'multilog_id', None, None, ), # 1
(2, TType.STRING, 'filter_expr', 'UTF8', None, ), # 2
)
class adhoc_filter_result(object):
"""
Attributes:
- success
- ex
"""
def __init__(self, success=None, ex=None,):
self.success = success
self.ex = ex
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = rpc_iterator_handle()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ex = rpc_invalid_operation()
self.ex.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('adhoc_filter_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ex is not None:
oprot.writeFieldBegin('ex', TType.STRUCT, 1)
self.ex.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(adhoc_filter_result)
adhoc_filter_result.thrift_spec = (
(0, TType.STRUCT, 'success', [rpc_iterator_handle, None], None, ), # 0
(1, TType.STRUCT, 'ex', [rpc_invalid_operation, None], None, ), # 1
)
class predef_filter_args(object):
"""
Attributes:
- multilog_id
- filter_name
- begin_ms
- end_ms
"""
def __init__(self, multilog_id=None, filter_name=None, begin_ms=None, end_ms=None,):
self.multilog_id = multilog_id
self.filter_name = filter_name
self.begin_ms = begin_ms
self.end_ms = end_ms
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.multilog_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.filter_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.begin_ms = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.end_ms = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('predef_filter_args')
if self.multilog_id is not None:
oprot.writeFieldBegin('multilog_id', TType.I64, 1)
oprot.writeI64(self.multilog_id)
oprot.writeFieldEnd()
if self.filter_name is not None:
oprot.writeFieldBegin('filter_name', TType.STRING, 2)
oprot.writeString(self.filter_name.encode('utf-8') if sys.version_info[0] == 2 else self.filter_name)
oprot.writeFieldEnd()
if self.begin_ms is not None:
oprot.writeFieldBegin('begin_ms', TType.I64, 3)
oprot.writeI64(self.begin_ms)
oprot.writeFieldEnd()
if self.end_ms is not None:
oprot.writeFieldBegin('end_ms', TType.I64, 4)
oprot.writeI64(self.end_ms)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(predef_filter_args)
predef_filter_args.thrift_spec = (
None, # 0
(1, TType.I64, 'multilog_id', None, None, ), # 1
(2, TType.STRING, 'filter_name', 'UTF8', None, ), # 2
(3, TType.I64, 'begin_ms', None, None, ), # 3
(4, TType.I64, 'end_ms', None, None, ), # 4
)
class predef_filter_result(object):
"""
Attributes:
- success
- ex
"""
def __init__(self, success=None, ex=None,):
self.success = success
self.ex = ex
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = rpc_iterator_handle()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ex = rpc_invalid_operation()
self.ex.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('predef_filter_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ex is not None:
oprot.writeFieldBegin('ex', TType.STRUCT, 1)
self.ex.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(predef_filter_result)
predef_filter_result.thrift_spec = (
(0, TType.STRUCT, 'success', [rpc_iterator_handle, None], None, ), # 0
(1, TType.STRUCT, 'ex', [rpc_invalid_operation, None], None, ), # 1
)
class combined_filter_args(object):
"""
Attributes:
- multilog_id
- filter_name
- filter_expr
- begin_ms
- end_ms
"""
def __init__(self, multilog_id=None, filter_name=None, filter_expr=None, begin_ms=None, end_ms=None,):
self.multilog_id = multilog_id
self.filter_name = filter_name
self.filter_expr = filter_expr
self.begin_ms = begin_ms
self.end_ms = end_ms
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.multilog_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.filter_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.filter_expr = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.begin_ms = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.end_ms = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('combined_filter_args')
if self.multilog_id is not None:
oprot.writeFieldBegin('multilog_id', TType.I64, 1)
oprot.writeI64(self.multilog_id)
oprot.writeFieldEnd()
if self.filter_name is not None:
oprot.writeFieldBegin('filter_name', TType.STRING, 2)
oprot.writeString(self.filter_name.encode('utf-8') if sys.version_info[0] == 2 else self.filter_name)
oprot.writeFieldEnd()
if self.filter_expr is not None:
oprot.writeFieldBegin('filter_expr', TType.STRING, 3)
oprot.writeString(self.filter_expr.encode('utf-8') if sys.version_info[0] == 2 else self.filter_expr)
oprot.writeFieldEnd()
if self.begin_ms is not None:
oprot.writeFieldBegin('begin_ms', TType.I64, 4)
oprot.writeI64(self.begin_ms)
oprot.writeFieldEnd()
if self.end_ms is not None:
oprot.writeFieldBegin('end_ms', TType.I64, 5)
oprot.writeI64(self.end_ms)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(combined_filter_args)
combined_filter_args.thrift_spec = (
None, # 0
(1, TType.I64, 'multilog_id', None, None, ), # 1
(2, TType.STRING, 'filter_name', 'UTF8', None, ), # 2
(3, TType.STRING, 'filter_expr', 'UTF8', None, ), # 3
(4, TType.I64, 'begin_ms', None, None, ), # 4
(5, TType.I64, 'end_ms', None, None, ), # 5
)
class combined_filter_result(object):
"""
Attributes:
- success
- ex
"""
def __init__(self, success=None, ex=None,):
self.success = success
self.ex = ex
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = rpc_iterator_handle()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ex = rpc_invalid_operation()
self.ex.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('combined_filter_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ex is not None:
oprot.writeFieldBegin('ex', TType.STRUCT, 1)
self.ex.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(combined_filter_result)
combined_filter_result.thrift_spec = (
(0, TType.STRUCT, 'success', [rpc_iterator_handle, None], None, ), # 0
(1, TType.STRUCT, 'ex', [rpc_invalid_operation, None], None, ), # 1
)
class alerts_by_time_args(object):
"""
Attributes:
- multilog_id
- begin_ms
- end_ms
"""
def __init__(self, multilog_id=None, begin_ms=None, end_ms=None,):
self.multilog_id = multilog_id
self.begin_ms = begin_ms
self.end_ms = end_ms
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.multilog_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.begin_ms = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.end_ms = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('alerts_by_time_args')
if self.multilog_id is not None:
oprot.writeFieldBegin('multilog_id', TType.I64, 1)
oprot.writeI64(self.multilog_id)
oprot.writeFieldEnd()
if self.begin_ms is not None:
oprot.writeFieldBegin('begin_ms', TType.I64, 2)
oprot.writeI64(self.begin_ms)
oprot.writeFieldEnd()
if self.end_ms is not None:
oprot.writeFieldBegin('end_ms', TType.I64, 3)
oprot.writeI64(self.end_ms)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(alerts_by_time_args)
alerts_by_time_args.thrift_spec = (
None, # 0
(1, TType.I64, 'multilog_id', None, None, ), # 1
(2, TType.I64, 'begin_ms', None, None, ), # 2
(3, TType.I64, 'end_ms', None, None, ), # 3
)
class alerts_by_time_result(object):
"""
Attributes:
- success
- ex
"""
def __init__(self, success=None, ex=None,):
self.success = success
self.ex = ex
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = rpc_iterator_handle()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ex = rpc_invalid_operation()
self.ex.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('alerts_by_time_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ex is not None:
oprot.writeFieldBegin('ex', TType.STRUCT, 1)
self.ex.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(alerts_by_time_result)
alerts_by_time_result.thrift_spec = (
(0, TType.STRUCT, 'success', [rpc_iterator_handle, None], None, ), # 0
(1, TType.STRUCT, 'ex', [rpc_invalid_operation, None], None, ), # 1
)
class alerts_by_trigger_and_time_args(object):
"""
Attributes:
- multilog_id
- trigger_name
- begin_ms
- end_ms
"""
def __init__(self, multilog_id=None, trigger_name=None, begin_ms=None, end_ms=None,):
self.multilog_id = multilog_id
self.trigger_name = trigger_name
self.begin_ms = begin_ms
self.end_ms = end_ms
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.multilog_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.trigger_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.begin_ms = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.end_ms = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('alerts_by_trigger_and_time_args')
if self.multilog_id is not None:
oprot.writeFieldBegin('multilog_id', TType.I64, 1)
oprot.writeI64(self.multilog_id)
oprot.writeFieldEnd()
if self.trigger_name is not None:
oprot.writeFieldBegin('trigger_name', TType.STRING, 2)
oprot.writeString(self.trigger_name.encode('utf-8') if sys.version_info[0] == 2 else self.trigger_name)
oprot.writeFieldEnd()
if self.begin_ms is not None:
oprot.writeFieldBegin('begin_ms', TType.I64, 3)
oprot.writeI64(self.begin_ms)
oprot.writeFieldEnd()
if self.end_ms is not None:
oprot.writeFieldBegin('end_ms', TType.I64, 4)
oprot.writeI64(self.end_ms)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(alerts_by_trigger_and_time_args)
alerts_by_trigger_and_time_args.thrift_spec = (
None, # 0
(1, TType.I64, 'multilog_id', None, None, ), # 1
(2, TType.STRING, 'trigger_name', 'UTF8', None, ), # 2
(3, TType.I64, 'begin_ms', None, None, ), # 3
(4, TType.I64, 'end_ms', None, None, ), # 4
)
class alerts_by_trigger_and_time_result(object):
"""
Attributes:
- success
- ex
"""
def __init__(self, success=None, ex=None,):
self.success = success
self.ex = ex
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = rpc_iterator_handle()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ex = rpc_invalid_operation()
self.ex.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('alerts_by_trigger_and_time_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ex is not None:
oprot.writeFieldBegin('ex', TType.STRUCT, 1)
self.ex.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(alerts_by_trigger_and_time_result)
alerts_by_trigger_and_time_result.thrift_spec = (
(0, TType.STRUCT, 'success', [rpc_iterator_handle, None], None, ), # 0
(1, TType.STRUCT, 'ex', [rpc_invalid_operation, None], None, ), # 1
)
class get_more_args(object):
"""
Attributes:
- multilog_id
- desc
"""
def __init__(self, multilog_id=None, desc=None,):
self.multilog_id = multilog_id
self.desc = desc
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.multilog_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.desc = rpc_iterator_descriptor()
self.desc.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('get_more_args')
if self.multilog_id is not None:
oprot.writeFieldBegin('multilog_id', TType.I64, 1)
oprot.writeI64(self.multilog_id)
oprot.writeFieldEnd()
if self.desc is not None:
oprot.writeFieldBegin('desc', TType.STRUCT, 2)
self.desc.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(get_more_args)
get_more_args.thrift_spec = (
None, # 0
(1, TType.I64, 'multilog_id', None, None, ), # 1
(2, TType.STRUCT, 'desc', [rpc_iterator_descriptor, None], None, ), # 2
)
class get_more_result(object):
"""
Attributes:
- success
- ex
"""
def __init__(self, success=None, ex=None,):
self.success = success
self.ex = ex
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = rpc_iterator_handle()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ex = rpc_invalid_operation()
self.ex.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('get_more_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ex is not None:
oprot.writeFieldBegin('ex', TType.STRUCT, 1)
self.ex.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(get_more_result)
get_more_result.thrift_spec = (
(0, TType.STRUCT, 'success', [rpc_iterator_handle, None], None, ), # 0
(1, TType.STRUCT, 'ex', [rpc_invalid_operation, None], None, ), # 1
)
class num_records_args(object):
"""
Attributes:
- multilog_id
"""
def __init__(self, multilog_id=None,):
self.multilog_id = multilog_id
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.multilog_id = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('num_records_args')
if self.multilog_id is not None:
oprot.writeFieldBegin('multilog_id', TType.I64, 1)
oprot.writeI64(self.multilog_id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(num_records_args)
num_records_args.thrift_spec = (
None, # 0
(1, TType.I64, 'multilog_id', None, None, ), # 1
)
class num_records_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I64:
self.success = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('num_records_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I64, 0)
oprot.writeI64(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(num_records_result)
num_records_result.thrift_spec = (
(0, TType.I64, 'success', None, None, ), # 0
)
fix_spec(all_structs)
del all_structs
| 34.84591
| 140
| 0.593638
| 20,581
| 187,018
| 5.110296
| 0.011127
| 0.034229
| 0.028068
| 0.024645
| 0.93913
| 0.916282
| 0.887835
| 0.867098
| 0.848405
| 0.83691
| 0
| 0.00755
| 0.305896
| 187,018
| 5,366
| 141
| 34.852404
| 0.802673
| 0.023372
| 0
| 0.846719
| 1
| 0
| 0.039039
| 0.004651
| 0
| 0
| 0
| 0
| 0
| 1
| 0.112533
| false
| 0.005923
| 0.001895
| 0.035537
| 0.207771
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
990afd8dc2055e712eecff979471b3367ad014e9
| 89,798
|
py
|
Python
|
exposan/bwaise/comparison/uncertainty/treatment.py
|
QSD-Group/EXPOsan
|
8419f795b366901dbaba4168dac74df9a54d79eb
|
[
"Unlicense"
] | 9
|
2021-03-19T04:13:05.000Z
|
2022-02-22T17:19:17.000Z
|
exposan/bwaise/comparison/uncertainty/treatment.py
|
QSD-Group/EXPOsan
|
8419f795b366901dbaba4168dac74df9a54d79eb
|
[
"Unlicense"
] | 17
|
2022-01-31T23:12:50.000Z
|
2022-03-31T14:48:55.000Z
|
exposan/bwaise/comparison/uncertainty/treatment.py
|
QSD-Group/EXPOsan
|
8419f795b366901dbaba4168dac74df9a54d79eb
|
[
"Unlicense"
] | 2
|
2021-04-29T13:19:17.000Z
|
2021-08-14T17:15:35.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 15 17:06:29 2018
@author: John Trimmer; Yalin Li (minor modification)
"""
# These functions track resources, costs, and emissions for various centralized treatment options
# (i) anaerobic_digestion
# (ii) sedimentation (used at the existing Lubigi plant)
# (iii) sludge_separator (used to estimate solid/liquid separation after anaerobic treatment in the alternative plant)
# (iv) anaerobic_lagoon (used at the existing Lubigi plant)
# (v) facultative_lagoon (used at the existing Lubigi plant)
# (vi) unplanted_drying_bed (used at the existing Lubigi plant)
# (vii) drying_beds_alt (drying beds of a different design for the anaerobic treatment plant)
# (viii) ABR (anaerobic baffled reactor used in the alternative plant)\
# (ix) secondary_liquid_bed (planted bed used for treatment of ABR liquid effluent in alternative plant)
import numpy as np
import pandas as pd
import copy
import lhs
import math
def first_order_decay(k, t0, t, max_decay, tot=1):
tf = t0 + t
Cdeg = tot * max_decay
Cavg = Cdeg/(k*t) * (np.exp(-k*t0)-np.exp(-k*tf))
loss = Cdeg - Cavg
return loss
#%% anaerobic digestion function
def anaerobic_digestion(inputs, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, biogas, parameters, correlation_distributions, correlation_parameters, n_samples, rate_constant, maximum_methane_emission, CH4_GWP, N2O_GWP, CH4_energy, previous_storage_time, additional_storage, sludge_flow_alt, concrete_thickness, concrete_IF_GHG, excavation_IF_GHG, plant_lifetime, sludge_pop_alt, discount_rate):
outputs = copy.deepcopy(inputs)
mass = np.reshape(inputs[:,0], (-1,1))
mass_dry = np.reshape(inputs[:,1], (-1,1))
N_total = np.reshape(inputs[:,2], (-1,1))
P_total = np.reshape(inputs[:,3], (-1,1))
K_total = np.reshape(inputs[:,4], (-1,1))
Mg_total = np.reshape(inputs[:,5], (-1,1))
Ca_total = np.reshape(inputs[:,6], (-1,1))
energy = np.reshape(inputs[:,7], (-1,1))
N_ammonia = np.reshape(inputs[:,8], (-1,1))
# biogas production
# parameters
MCF, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.MCF_AD, correlation_distributions, correlation_parameters, n_samples)
COD_removal_AD, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.COD_removal_AD, correlation_distributions, correlation_parameters, n_samples)
residence_time_AD, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.residence_time_AD, correlation_distributions, correlation_parameters, n_samples)
# calculations
# carbon (kg COD/yr; assume 14 kJ/g COD in wastewater)
COD_total = (energy/14/1000)
COD_degrade = COD_total*(COD_removal_AD/100)
CH4_production = COD_degrade*(MCF/100)*maximum_methane_emission
COD_total = COD_total - COD_degrade
energy = COD_total*14*1000
if parameters.CH4_captured_AD.expected == 'yes':
CH4eq = np.full([n_samples, 1], 0)
else:
CH4eq = CH4_production*CH4_GWP
CH4_production = 0
# nitrogen emissions (kg N/yr; N2O expressed as kg N2O/yr)
if parameters.N_emission_in_biogas_AD.expected == 'yes':
N2O_EF, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.N2O_EF_AD, correlation_distributions, correlation_parameters, n_samples)
N_denitrification, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.N_max_denitrification_AD, correlation_distributions, correlation_parameters, n_samples)
#!!! Changed algorithm
t0 = previous_storage_time + additional_storage
t = residence_time_AD / 365
N_loss = first_order_decay(rate_constant, t0, t,
max_decay=N_denitrification/100,
tot=N_total)
# N_degradable = N_total * (N_denitrification/100)
# N_initial = (N_degradable*rate_constant*(previous_storage_time))/(np.exp(-rate_constant*(additional_storage)) - np.exp(-rate_constant*(additional_storage+previous_storage_time)))
# N_after = (N_initial/(rate_constant*(previous_storage_time)))*(np.exp(-rate_constant*(additional_storage+residence_time_AD/365)) - np.exp(-rate_constant*(previous_storage_time + additional_storage + residence_time_AD/365)))
# N_loss = N_degradable - N_after
N2O_emission = N_loss*(N2O_EF/100)*(44/28)
N2Oeq = N2O_emission*N2O_GWP
N_total = N_total - N_loss
N_ammonia = N_ammonia - N_loss
for i in range(0, len(N_ammonia)):
if N_ammonia[i] < 0:
N_ammonia[i] = 0
else:
N2Oeq = np.full([n_samples, 1], 0)
# solids loss (based on COD loss)
mass = mass - mass_dry * (COD_removal_AD/100)
mass_dry = mass_dry - mass_dry * (COD_removal_AD/100)
# energy in biogas (kJ/yr)
biogas_new = CH4_production / 16 * CH4_energy * 1000
biogas = biogas + biogas_new
outputs[:,0:9] = np.concatenate((mass, mass_dry, N_total, P_total, K_total,
Mg_total, Ca_total, energy, N_ammonia), 1)
# construction costs and emissions
number, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.number_AD, correlation_distributions, correlation_parameters, n_samples)
aspect_ratio, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.aspect_ratio_AD, correlation_distributions, correlation_parameters, n_samples)
headspace, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.headspace_AD, correlation_distributions, correlation_parameters, n_samples)
reactor_volume = ((residence_time_AD*sludge_flow_alt)/(number-1))/(1 - headspace/100)
reactor_diameter = ((4*reactor_volume*aspect_ratio)/math.pi)**(1/3)
reactor_height = reactor_diameter/aspect_ratio
concrete_volume = number*(concrete_thickness*((2*(math.pi/4)*(reactor_diameter**2))+(reactor_height*math.pi*reactor_diameter)))
concrete_emissions = concrete_volume * concrete_IF_GHG
excavation_volume = reactor_volume * number
excavation_emissions = excavation_volume * excavation_IF_GHG
construction_emissions_annual = (concrete_emissions + excavation_emissions)/plant_lifetime/sludge_pop_alt
tech_construction_emissions[:,3:4] = tech_construction_emissions[:,3:4] + construction_emissions_annual
direct_emissions[:,3:4] = direct_emissions[:,3:4] + CH4eq + N2Oeq
# cost
if parameters.use_total_price.expected == 'no':
concrete_cost, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.concrete_cost, correlation_distributions, correlation_parameters, n_samples)
steel_cost, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.steel_cost, correlation_distributions, correlation_parameters, n_samples)
construction_cost = (concrete_volume + concrete_volume)*concrete_cost
opex_percent, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.alternative_plant_opex, correlation_distributions, correlation_parameters, n_samples)
annual_opex = (construction_cost*(opex_percent/100)) / sludge_pop_alt
capex_annualized = (construction_cost * ((discount_rate*(1 + discount_rate)**plant_lifetime)/(((1 + discount_rate)**plant_lifetime) - 1))) / sludge_pop_alt
construction_cost[:,3:4] = construction_cost[:,3:4] + capex_annualized
operating_cost[:,3:4] = operating_cost[:,3:4] + annual_opex
electricity_cost, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.electricity_cost, correlation_distributions, correlation_parameters, n_samples)
electricity_GHG, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.electricity_GHG, correlation_distributions, correlation_parameters, n_samples)
# assume all operating expenses come from electricity
annual_kWh = annual_opex / electricity_cost
operating_emissions = (annual_kWh * electricity_GHG)
tech_operating_emissions[:,3:4] = tech_operating_emissions[:,3:4] + operating_emissions
additional_storage = additional_storage + residence_time_AD/365
return outputs, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, biogas, correlation_distributions, correlation_parameters, additional_storage
#%% sedimentation function
def sedimentation(inputs, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, parameters, correlation_distributions, correlation_parameters, n_samples, rate_constant, maximum_methane_emission, CH4_GWP, N2O_GWP, previous_storage_time, additional_storage, concrete_thickness, roof_slope, roof_mass, concrete_IF_GHG, steel_IF_GHG, plant_lifetime, existing_population, discount_rate):
solids_outputs = copy.deepcopy(inputs)
liquid_outputs = copy.deepcopy(inputs)
mass = np.reshape(inputs[:,0], (-1,1))
mass_dry = np.reshape(inputs[:,1], (-1,1))
N_total = np.reshape(inputs[:,2], (-1,1))
P_total = np.reshape(inputs[:,3], (-1,1))
K_total = np.reshape(inputs[:,4], (-1,1))
Mg_total = np.reshape(inputs[:,5], (-1,1))
Ca_total = np.reshape(inputs[:,6], (-1,1))
energy = np.reshape(inputs[:,7], (-1,1))
N_ammonia = np.reshape(inputs[:,8], (-1,1))
# calculate retention in settled solids (before degradation)
COD_retention_sedimentation, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.COD_retention_sedimentation, correlation_distributions, correlation_parameters, n_samples)
TS_retention_sedimentation, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.TS_retention_sedimentation, correlation_distributions, correlation_parameters, n_samples)
N_retention_sedimentation, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.N_retention_sedimentation, correlation_distributions, correlation_parameters, n_samples)
P_retention_sedimentation, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.P_retention_sedimentation, correlation_distributions, correlation_parameters, n_samples)
K_retention_sedimentation, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.K_retention_sedimentation, correlation_distributions, correlation_parameters, n_samples)
Mg_retention_sedimentation, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.Mg_retention_sedimentation, correlation_distributions, correlation_parameters, n_samples)
Ca_retention_sedimentation, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.Ca_retention_sedimentation, correlation_distributions, correlation_parameters, n_samples)
solids_COD = (energy * (COD_retention_sedimentation/100))/14/1000
solids_TS = mass_dry * (TS_retention_sedimentation/100)
solids_N = N_total * (N_retention_sedimentation/100)
solids_P = P_total * (P_retention_sedimentation/100)
solids_K = K_total * (K_retention_sedimentation/100)
solids_Mg = Mg_total * (Mg_retention_sedimentation/100)
solids_Ca = Ca_total * (Ca_retention_sedimentation/100)
liquid_COD = (energy * ((100 - COD_retention_sedimentation)/100))/14/1000
liquid_TS = mass_dry * ((100 - TS_retention_sedimentation)/100)
liquid_N = N_total * ((100 - N_retention_sedimentation)/100)
liquid_P = P_total * ((100 - P_retention_sedimentation)/100)
liquid_K = K_total * ((100 - K_retention_sedimentation)/100)
liquid_Mg = Mg_total * ((100 - Mg_retention_sedimentation)/100)
liquid_Ca = Ca_total * ((100 - Ca_retention_sedimentation)/100)
# assume as much ammonia as possible drains with liquid
liquid_N_ammonia = np.full(np.shape(N_ammonia), np.nan)
solids_N_ammonia = np.full(np.shape(N_ammonia), np.nan)
N_ammonia_temp = copy.deepcopy(N_ammonia)
liquid_N_temp = copy.deepcopy(liquid_N)
for i in range (0, len(N_ammonia_temp)):
if N_ammonia_temp[i] <= liquid_N[i]:
liquid_N_ammonia[i] = N_ammonia_temp[i]
solids_N_ammonia[i] = 0
else:
liquid_N_ammonia[i] = liquid_N_temp[i]
solids_N_ammonia[i] = N_ammonia_temp[i] - liquid_N_temp[i]
# calculate degradation in settled solids
MCF, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.MCF_sedimentation, correlation_distributions, correlation_parameters, n_samples)
retention_time_sedimentation, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.retention_time_sedimentation, correlation_distributions, correlation_parameters, n_samples)
COD_degradation, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.COD_degradation_sedimentation, correlation_distributions, correlation_parameters, n_samples)
#!!! Changed algorithm
t0 = previous_storage_time + additional_storage
t = retention_time_sedimentation / 365
COD_loss = first_order_decay(rate_constant, t0, t,
max_decay=COD_degradation/100,
tot=solids_COD)
# COD_degradable = solids_COD * COD_degradation/100
# COD_initial = (COD_degradable*rate_constant*previous_storage_time)/(np.exp(-rate_constant*(additional_storage)) - np.exp(-rate_constant*(additional_storage+previous_storage_time)))
# COD_after = (COD_initial/(rate_constant*(previous_storage_time)))*(np.exp(-rate_constant*(additional_storage+retention_time_sedimentation/365)) - np.exp(-rate_constant*(previous_storage_time + additional_storage + retention_time_sedimentation/365)))
# COD_loss = COD_degradable - COD_after
COD_reduction = COD_loss/solids_COD
CH4_emission = COD_loss*(MCF/100)*maximum_methane_emission
CH4eq = CH4_emission*CH4_GWP
solids_COD = solids_COD - COD_loss
solids_energy = solids_COD*14*1000
liquid_energy = liquid_COD*14*1000
#solids loss from COD degradation
mass = mass - solids_TS*COD_reduction
solids_TS = solids_TS - solids_TS*COD_reduction
if parameters.N_emission_from_sedimentation.expected == 'yes':
N2O_EF, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.N2O_EF_sedimentation, correlation_distributions, correlation_parameters, n_samples)
N_denitrification, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.N_max_denitrification_sedimentation, correlation_distributions, correlation_parameters, n_samples)
#!!! Changed algorithm
t0 = previous_storage_time + additional_storage
t = retention_time_sedimentation / 365
N_loss = first_order_decay(rate_constant, t0, t,
max_decay=N_denitrification/100,
tot=N_total)
N2O_emission = N_loss*(N2O_EF/100)*(44/28)
# N_degradable = solids_N * (N_denitrification/100)
# N_initial = (N_degradable*rate_constant*(previous_storage_time))/(np.exp(-rate_constant*(additional_storage)) - np.exp(-rate_constant*(additional_storage+previous_storage_time)))
# N_after = (N_initial/(rate_constant*(previous_storage_time)))*(np.exp(-rate_constant*(additional_storage+retention_time_sedimentation/365)) - np.exp(-rate_constant*(previous_storage_time + additional_storage + retention_time_sedimentation/365)))
# N_loss = N_degradable - N_after
# N2O_emission = solids_N*(N_loss/N_degradable)*(N2O_EF/100)*(44/28)
for i in range(0, len(N2O_emission)):
if N2O_emission[i] > N_loss[i]*(44/28):
N2O_emission[i] = N_loss[i]*(44/28)
N2Oeq = N2O_emission*N2O_GWP
solids_N = solids_N - N_loss
solids_N_ammonia = solids_N_ammonia - N_loss
for i in range(0, len(solids_N_ammonia)):
if solids_N_ammonia[i] < 0:
solids_N_ammonia[i] = 0
else:
N2Oeq = np.full([n_samples, 1], 0)
# calculate total mass of settled solids based on final solids content
final_solids_content, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.final_solids_content_sedimentation, correlation_distributions, correlation_parameters, n_samples)
liquid = np.full(np.shape(final_solids_content), np.nan)
solids = np.full(np.shape(final_solids_content), np.nan)
for i in range(0, len(solids_TS)):
if final_solids_content[i] > ((solids_TS[i]/mass[i])*100):
solids[i] = solids_TS[i] / (final_solids_content[i]/100)
drained_water = mass[i] - solids[i]
liquid[i] = drained_water + liquid_TS[i]
else:
solids[i] = mass[i]
liquid[i] = liquid_TS[i]
# construction costs and emissions
volume, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.volume_sedimentation_tank, correlation_distributions, correlation_parameters, n_samples)
length_width_ratio, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.length_width_ratio_sedimentation, correlation_distributions, correlation_parameters, n_samples)
width_height_ratio, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.width_height_ratio_sedimentation, correlation_distributions, correlation_parameters, n_samples)
number, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.number_sedimentation_tanks, correlation_distributions, correlation_parameters, n_samples)
columns_per_side, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.columns_per_side_sedimentation, correlation_distributions, correlation_parameters, n_samples)
average_height = (volume/(length_width_ratio*(width_height_ratio**2)))**(1/3)
width = average_height * width_height_ratio
length = width * length_width_ratio
concrete_volume = (concrete_thickness*(width*length + 2*width*average_height + 2*length*average_height))*number
concrete_columns = (concrete_thickness**2)*(average_height)*(columns_per_side*2)*number
concrete_emissions = (concrete_volume + concrete_columns)*concrete_IF_GHG
roof_area = np.full(np.shape(roof_slope), np.nan)
for i in range(0,len(roof_slope)):
roof_area[i] = (number[i]*length[i]*width[i])/math.cos(roof_slope[i]*math.pi/180)
siding_area = number*(2*length*average_height + 2*width*average_height)
mass_steel = (roof_area + siding_area)*roof_mass
steel_emissions = mass_steel*steel_IF_GHG
construction_emissions_annual = (concrete_emissions + steel_emissions)/plant_lifetime/existing_population
tech_construction_emissions[:,3:4] = tech_construction_emissions[:,3:4] + construction_emissions_annual
solids_outputs[:,0:9] = np.concatenate((solids, solids_TS, solids_N, solids_P, solids_K,
solids_Mg, solids_Ca, solids_energy, solids_N_ammonia), 1)
liquid_outputs[:,0:9] = np.concatenate((liquid, liquid_TS, liquid_N, liquid_P, liquid_K,
liquid_Mg, liquid_Ca, liquid_energy, liquid_N_ammonia), 1)
direct_emissions[:,3:4] = direct_emissions[:,3:4] + CH4eq + N2Oeq
# cost
if parameters.use_total_price.expected == 'no':
concrete_cost, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.concrete_cost, correlation_distributions, correlation_parameters, n_samples)
steel_cost, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.steel_cost, correlation_distributions, correlation_parameters, n_samples)
cap_cost = (concrete_volume + concrete_columns)*concrete_cost + mass_steel*steel_cost
opex_percent, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.existing_plant_opex, correlation_distributions, correlation_parameters, n_samples)
annual_opex = (cap_cost*(opex_percent/100)) / existing_population
capex_annualized = (cap_cost * ((discount_rate*(1 + discount_rate)**plant_lifetime)/(((1 + discount_rate)**plant_lifetime) - 1))) / existing_population
construction_cost[:,3:4] = construction_cost[:,3:4] + capex_annualized
operating_cost[:,3:4] = operating_cost[:,3:4] + annual_opex
electricity_cost, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.electricity_cost, correlation_distributions, correlation_parameters, n_samples)
electricity_GHG, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.electricity_GHG, correlation_distributions, correlation_parameters, n_samples)
# assume all operating expenses come from electricity
annual_kWh = annual_opex / electricity_cost
operating_emissions = (annual_kWh * electricity_GHG)
tech_operating_emissions[:,3:4] = tech_operating_emissions[:,3:4] + operating_emissions
solid_additional_storage = additional_storage + retention_time_sedimentation/365
liquid_additional_storage = additional_storage
return liquid_outputs, solids_outputs, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, correlation_distributions, correlation_parameters, liquid_additional_storage, solid_additional_storage
#%% sludge separator function
def sludge_separator(inputs, direct_emissions, parameters, correlation_distributions, correlation_parameters, n_samples, rate_constant, maximum_methane_emission, CH4_GWP, N2O_GWP, previous_storage_time, additional_storage):
solids_outputs = copy.deepcopy(inputs)
liquid_outputs = copy.deepcopy(inputs)
mass = np.reshape(inputs[:,0], (-1,1))
mass_dry = np.reshape(inputs[:,1], (-1,1))
N_total = np.reshape(inputs[:,2], (-1,1))
P_total = np.reshape(inputs[:,3], (-1,1))
K_total = np.reshape(inputs[:,4], (-1,1))
Mg_total = np.reshape(inputs[:,5], (-1,1))
Ca_total = np.reshape(inputs[:,6], (-1,1))
energy = np.reshape(inputs[:,7], (-1,1))
N_ammonia = np.reshape(inputs[:,8], (-1,1))
# calculate retention in settled solids (before degradation)
COD_retention, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.COD_retention_separator, correlation_distributions, correlation_parameters, n_samples)
TS_retention, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.TS_retention_separator, correlation_distributions, correlation_parameters, n_samples)
N_retention, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.N_retention_separator, correlation_distributions, correlation_parameters, n_samples)
P_retention, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.P_retention_separator, correlation_distributions, correlation_parameters, n_samples)
K_retention, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.K_retention_separator, correlation_distributions, correlation_parameters, n_samples)
Mg_retention, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.Mg_retention_separator, correlation_distributions, correlation_parameters, n_samples)
Ca_retention, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.Ca_retention_separator, correlation_distributions, correlation_parameters, n_samples)
solids_COD = (energy * (COD_retention/100))/14/1000
solids_TS = mass_dry * (TS_retention/100)
solids_N = N_total * (N_retention/100)
solids_P = P_total * (P_retention/100)
solids_K = K_total * (K_retention/100)
solids_Mg = Mg_total * (Mg_retention/100)
solids_Ca = Ca_total * (Ca_retention/100)
liquid_COD = (energy * ((100 - COD_retention)/100))/14/1000
liquid_TS = mass_dry * ((100 - TS_retention)/100)
liquid_N = N_total * ((100 - N_retention)/100)
liquid_P = P_total * ((100 - P_retention)/100)
liquid_K = K_total * ((100 - K_retention)/100)
liquid_Mg = Mg_total * ((100 - Mg_retention)/100)
liquid_Ca = Ca_total * ((100 - Ca_retention)/100)
# assume as much ammonia as possible drains with liquid
liquid_N_ammonia = np.full(np.shape(N_ammonia), np.nan)
solids_N_ammonia = np.full(np.shape(N_ammonia), np.nan)
N_ammonia_temp = copy.deepcopy(N_ammonia)
liquid_N_temp = copy.deepcopy(liquid_N)
for i in range (0, len(N_ammonia_temp)):
if N_ammonia_temp[i] <= liquid_N[i]:
liquid_N_ammonia[i] = N_ammonia_temp[i]
solids_N_ammonia[i] = 0
else:
liquid_N_ammonia[i] = liquid_N_temp[i]
solids_N_ammonia[i] = N_ammonia_temp[i] - liquid_N_temp[i]
solids_energy = solids_COD*14*1000
liquid_energy = liquid_COD*14*1000
# calculate total mass of settled solids based on final solids content
final_solids_content, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.final_solids_content_sedimentation, correlation_distributions, correlation_parameters, n_samples)
liquid = np.full(np.shape(final_solids_content), np.nan)
solids = np.full(np.shape(final_solids_content), np.nan)
for i in range(0, len(solids_TS)):
if final_solids_content[i] > ((solids_TS[i]/mass[i])*100):
solids[i] = solids_TS[i] / (final_solids_content[i]/100)
drained_water = mass[i] - solids[i]
liquid[i] = drained_water + liquid_TS[i]
else:
solids[i] = mass[i]
liquid[i] = liquid_TS[i]
solids_outputs[:,0:9] = np.concatenate((solids, solids_TS, solids_N, solids_P, solids_K,
solids_Mg, solids_Ca, solids_energy, solids_N_ammonia), 1)
liquid_outputs[:,0:9] = np.concatenate((liquid, liquid_TS, liquid_N, liquid_P, liquid_K,
liquid_Mg, liquid_Ca, liquid_energy, liquid_N_ammonia), 1)
liquid_additional_storage = copy.deepcopy(additional_storage)
solid_additional_storage = copy.deepcopy(additional_storage)
return liquid_outputs, solids_outputs, direct_emissions, correlation_distributions, correlation_parameters, liquid_additional_storage, solid_additional_storage
#%% anaerobic lagoon function
def anaerobic_lagoon(inputs, direct_emissions, tech_construction_emissions, tech_operating_emissions, parameters, correlation_distributions, correlation_parameters, n_samples, rate_constant, maximum_methane_emission, CH4_GWP, N2O_GWP, previous_storage_time, additional_storage, flow_rate, excavation_IF_GHG, liner_mass, liner_IF_GHG, plant_lifetime, existing_population):
outputs = copy.deepcopy(inputs)
mass = np.reshape(inputs[:,0], (-1,1))
mass_dry = np.reshape(inputs[:,1], (-1,1))
N_total = np.reshape(inputs[:,2], (-1,1))
P_total = np.reshape(inputs[:,3], (-1,1))
K_total = np.reshape(inputs[:,4], (-1,1))
Mg_total = np.reshape(inputs[:,5], (-1,1))
Ca_total = np.reshape(inputs[:,6], (-1,1))
energy = np.reshape(inputs[:,7], (-1,1))
N_ammonia = np.reshape(inputs[:,8], (-1,1))
# parameters
COD_removal, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.COD_removal_anaerobic_lagoon, correlation_distributions, correlation_parameters, n_samples)
COD_degradation, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.COD_degradation_anaerobic_lagoon, correlation_distributions, correlation_parameters, n_samples)
MCF, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.MCF_anaerobic_lagoon, correlation_distributions, correlation_parameters, n_samples)
volume, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.volume_anaerobic_lagoon, correlation_distributions, correlation_parameters, n_samples)
number, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.number_anaerobic_lagoons, correlation_distributions, correlation_parameters, n_samples)
length, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.length_anaerobic_lagoon, correlation_distributions, correlation_parameters, n_samples)
width, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.width_anaerobic_lagoon, correlation_distributions, correlation_parameters, n_samples)
# calculations
retention_time = (volume*number)/flow_rate
COD_influent = energy/14/1000
COD_removed = COD_influent*(COD_removal/100)
mass = mass - mass_dry*(COD_removal/100)
mass_dry = mass_dry - mass_dry*(COD_removal/100) # assume solids removal is similar to COD removal
COD_loss = COD_removed * COD_degradation/100
CH4_emission = COD_loss*(MCF/100)*maximum_methane_emission
CH4eq = CH4_emission*CH4_GWP
COD_effluent = COD_influent - COD_removed
energy = COD_effluent*14*1000
if parameters.N_emission_from_anaerobic_lagoon.expected == 'yes':
N_denitrification, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.N_max_denitrification_anaerobic_lagoon, correlation_distributions, correlation_parameters, n_samples)
N2O_EF, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.N2O_EF_anaerobic_lagoon, correlation_distributions, correlation_parameters, n_samples)
#!!! Changed algorithm
t0 = previous_storage_time + additional_storage
t = retention_time / 365
N_loss = first_order_decay(rate_constant, t0, t,
max_decay=N_denitrification/100,
tot=N_total)
N2O_emission = N_loss*(N2O_EF/100)*(44/28)
# N_degradable = N_total * (N_denitrification/100)
# N_initial = (N_degradable*rate_constant*previous_storage_time)/(np.exp(-rate_constant*additional_storage) - np.exp(-rate_constant*(additional_storage+previous_storage_time)))
# N_after = (N_initial/(rate_constant*(previous_storage_time)))*(np.exp(-rate_constant*(additional_storage+retention_time/365)) - np.exp(-rate_constant*(previous_storage_time + additional_storage + retention_time/365)))
# N_loss = N_degradable - N_after
# N2O_emission = N_total*(N_loss/N_degradable)*(N2O_EF/100)*(44/28)
for i in range(0, len(N2O_emission)):
if N2O_emission[i] > N_loss[i]*(44/28):
N2O_emission[i] = N_loss[i]*(44/28)
N2Oeq = N2O_emission*N2O_GWP
N_total = N_total - N_loss
N_ammonia = N_ammonia - N_loss
for i in range(0, len(N_ammonia)):
if N_ammonia[i] < 0:
N_ammonia[i] = 0
else:
N2Oeq = np.full((n_samples,1), 0)
# construction costs and emissions
average_depth = volume/(length*width)
liner_area = ((length*width)+(average_depth*(2*length+2*width)))*number
excavation_emissions = (volume*number)*excavation_IF_GHG
liner_emissions = liner_area*liner_mass*liner_IF_GHG
construction_emissions_annual = (excavation_emissions+liner_emissions)/plant_lifetime/existing_population
tech_construction_emissions[:,3:4] = tech_construction_emissions[:,3:4] + construction_emissions_annual
outputs[:,0:9] = np.concatenate((mass, mass_dry, N_total, P_total, K_total,
Mg_total, Ca_total, energy, N_ammonia), 1)
direct_emissions[:,3:4] = direct_emissions[:,3:4] + CH4eq + N2Oeq
additional_storage = additional_storage + retention_time/365
return outputs, direct_emissions, tech_construction_emissions, tech_operating_emissions, correlation_distributions, correlation_parameters, additional_storage
#%% facultative lagoon function
def facultative_lagoon(inputs, direct_emissions, tech_construction_emissions, tech_operating_emissions, parameters, correlation_distributions, correlation_parameters, n_samples, rate_constant, maximum_methane_emission, CH4_GWP, N2O_GWP, previous_storage_time, additional_storage, flow_rate, excavation_IF_GHG, liner_mass, liner_IF_GHG, plant_lifetime, existing_population):
outputs = copy.deepcopy(inputs)
mass = np.reshape(inputs[:,0], (-1,1))
mass_dry = np.reshape(inputs[:,1], (-1,1))
N_total = np.reshape(inputs[:,2], (-1,1))
P_total = np.reshape(inputs[:,3], (-1,1))
K_total = np.reshape(inputs[:,4], (-1,1))
Mg_total = np.reshape(inputs[:,5], (-1,1))
Ca_total = np.reshape(inputs[:,6], (-1,1))
energy = np.reshape(inputs[:,7], (-1,1))
N_ammonia = np.reshape(inputs[:,8], (-1,1))
# parameters
COD_removal, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.COD_removal_facultative_lagoon, correlation_distributions, correlation_parameters, n_samples)
COD_degradation, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.COD_degradation_facultative_lagoon, correlation_distributions, correlation_parameters, n_samples)
MCF, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.MCF_facultative_lagoon, correlation_distributions, correlation_parameters, n_samples)
N2O_EF, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.N2O_EF_facultative_lagoon, correlation_distributions, correlation_parameters, n_samples)
N_denitrification, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.N_max_denitrification_facultative_lagoon, correlation_distributions, correlation_parameters, n_samples)
P_removal, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.P_removal_facultative_lagoon, correlation_distributions, correlation_parameters, n_samples)
volume, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.volume_facultative_lagoon, correlation_distributions, correlation_parameters, n_samples)
number, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.number_facultative_lagoons, correlation_distributions, correlation_parameters, n_samples)
length, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.length_facultative_lagoon, correlation_distributions, correlation_parameters, n_samples)
width, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.width_facultative_lagoon, correlation_distributions, correlation_parameters, n_samples)
# calculations
retention_time = (volume*number)/flow_rate
COD_influent = energy/14/1000
COD_removed = COD_influent*(COD_removal/100)
mass = mass - mass_dry*(COD_removal/100)
mass_dry = mass_dry - mass_dry*(COD_removal/100) # assume solids removal is similar to COD removal
COD_loss = COD_removed * COD_degradation/100
CH4_emission = COD_loss*(MCF/100)*maximum_methane_emission
CH4eq = CH4_emission*CH4_GWP
COD_effluent = COD_influent - COD_removed
energy = COD_effluent*14*1000
#!!! Changed algorithm
t0 = previous_storage_time + additional_storage
t = retention_time / 365
N_loss = first_order_decay(rate_constant, t0, t,
max_decay=N_denitrification/100,
tot=N_total)
N2O_emission = N_loss*(N2O_EF/100)*(44/28)
# N_degradable = N_total * (N_denitrification/100)
# N_initial = (N_degradable*rate_constant*previous_storage_time)/(np.exp(-rate_constant*additional_storage) - np.exp(-rate_constant*(additional_storage+previous_storage_time)))
# N_after = (N_initial/(rate_constant*(previous_storage_time)))*(np.exp(-rate_constant*(additional_storage+retention_time/365)) - np.exp(-rate_constant*(previous_storage_time + additional_storage + retention_time/365)))
# N_loss = N_degradable - N_after
# N2O_emission = N_total*(N_loss/N_degradable)*(N2O_EF/100)*(44/28)
for i in range(0, len(N2O_emission)):
if N2O_emission[i] > N_loss[i]*(44/28):
N2O_emission[i] = N_loss[i]*(44/28)
N2Oeq = N2O_emission*N2O_GWP
N_total = N_total - N_loss
N_ammonia = N_ammonia - N_loss
for i in range(0, len(N_ammonia)):
if N_ammonia[i] < 0:
N_ammonia[i] = 0
P_loss = P_total * (P_removal/100)
P_total = P_total - P_loss
# construction costs and emissions
average_depth = volume/(length*width)
liner_area = ((length*width)+(average_depth*(2*length+2*width)))*number
excavation_emissions = (volume*number)*excavation_IF_GHG
liner_emissions = liner_area*liner_mass*liner_IF_GHG
construction_emissions_annual = (excavation_emissions+liner_emissions)/plant_lifetime/existing_population
tech_construction_emissions[:,3:4] = tech_construction_emissions[:,3:4] + construction_emissions_annual
outputs[:,0:9] = np.concatenate((mass, mass_dry, N_total, P_total, K_total,
Mg_total, Ca_total, energy, N_ammonia), 1)
additional_storage = additional_storage + retention_time/365
direct_emissions[:,3:4] = direct_emissions[:,3:4] + CH4eq + N2Oeq
return outputs, direct_emissions, tech_construction_emissions, tech_operating_emissions, correlation_distributions, correlation_parameters, additional_storage
#%% unplanted drying bed function
def unplanted_drying_bed(inputs, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, parameters, correlation_distributions, correlation_parameters, n_samples, rate_constant, maximum_methane_emission, CH4_GWP, N2O_GWP, previous_storage_time, additional_storage, concrete_thickness, roof_slope, roof_mass, concrete_IF_GHG, steel_IF_GHG, plant_lifetime, existing_population, discount_rate):
outputs = copy.deepcopy(inputs)
mass = np.reshape(inputs[:,0], (-1,1))
mass_dry = np.reshape(inputs[:,1], (-1,1))
N_total = np.reshape(inputs[:,2], (-1,1))
P_total = np.reshape(inputs[:,3], (-1,1))
K_total = np.reshape(inputs[:,4], (-1,1))
Mg_total = np.reshape(inputs[:,5], (-1,1))
Ca_total = np.reshape(inputs[:,6], (-1,1))
energy = np.reshape(inputs[:,7], (-1,1))
N_ammonia = np.reshape(inputs[:,8], (-1,1))
# parameters
COD_degradation, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.COD_degradation_drying_bed, correlation_distributions, correlation_parameters, n_samples)
retention_time, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.retention_time_drying_bed, correlation_distributions, correlation_parameters, n_samples)
MCF, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.MCF_drying_bed, correlation_distributions, correlation_parameters, n_samples)
N2O_EF, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.N2O_EF_drying_bed, correlation_distributions, correlation_parameters, n_samples)
N_denitrification, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.N_max_denitrification_drying_bed, correlation_distributions, correlation_parameters, n_samples)
final_solids_content, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.final_solids_content_drying_bed, correlation_distributions, correlation_parameters, n_samples)
# calculations
COD_influent = energy/14/1000
#!!! Changed algorithm
t0 = previous_storage_time + additional_storage
t = retention_time / 365
COD_loss = first_order_decay(rate_constant, t0, t,
max_decay=COD_degradation/100,
tot=COD_influent)
# COD_degradable = COD_influent * COD_degradation/100
# COD_initial = (COD_degradable*rate_constant*previous_storage_time)/(np.exp(-rate_constant*additional_storage) - np.exp(-rate_constant*(additional_storage+previous_storage_time)))
# COD_after = (COD_initial/(rate_constant*(previous_storage_time)))*(np.exp(-rate_constant*(additional_storage+retention_time/365)) - np.exp(-rate_constant*(previous_storage_time + additional_storage + retention_time/365)))
# COD_loss = COD_degradable - COD_after
COD_reduction = COD_loss/COD_influent
CH4_emission = COD_loss*(MCF/100)*maximum_methane_emission
CH4eq = CH4_emission*CH4_GWP
COD_effluent = COD_influent - COD_loss
energy = COD_effluent*14*1000
mass = mass - mass_dry*COD_reduction
mass_dry = mass_dry - mass_dry*COD_reduction
#!!! Changed algorithm
N_loss = first_order_decay(rate_constant, t0, t,
max_decay=N_denitrification/100,
tot=N_total)
N2O_emission = N_loss*(N2O_EF/100)*(44/28)
# N_degradable = N_total * (N_denitrification/100)
# N_initial = (N_degradable*rate_constant*previous_storage_time)/(np.exp(-rate_constant*additional_storage) - np.exp(-rate_constant*(additional_storage+previous_storage_time)))
# N_after = (N_initial/(rate_constant*(previous_storage_time)))*(np.exp(-rate_constant*(additional_storage+retention_time/365)) - np.exp(-rate_constant*(previous_storage_time + additional_storage + retention_time/365)))
# N_loss = N_degradable - N_after
# N2O_emission = N_total*(N_loss/N_degradable)*(N2O_EF/100)*(44/28)
for i in range(0, len(N2O_emission)):
if N2O_emission[i] > N_loss[i]*(44/28):
N2O_emission[i] = N_loss[i]*(44/28)
N2Oeq = N2O_emission*N2O_GWP
N_total = N_total - N_loss
N_ammonia = N_ammonia - N_loss
for i in range(0, len(N_ammonia)):
if N_ammonia[i] < 0:
N_ammonia[i] = 0
for i in range(0, len(mass_dry)):
if final_solids_content[i] > ((mass_dry[i]/mass[i])*100):
mass[i] = mass_dry[i] / (final_solids_content[i]/100)
# construction costs and emissions
number_storage, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.number_storage_beds, correlation_distributions, correlation_parameters, n_samples)
number_covered, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.number_covered_drying_beds, correlation_distributions, correlation_parameters, n_samples)
number_uncovered, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.number_uncovered_drying_beds, correlation_distributions, correlation_parameters, n_samples)
height_storage, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.storage_wall_height, correlation_distributions, correlation_parameters, n_samples)
width_covered, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.covered_bed_width, correlation_distributions, correlation_parameters, n_samples)
length_covered, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.covered_bed_length, correlation_distributions, correlation_parameters, n_samples)
height_drying, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.drying_bed_wall_height, correlation_distributions, correlation_parameters, n_samples)
width_uncovered, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.uncovered_bed_width, correlation_distributions, correlation_parameters, n_samples)
length_uncovered, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.uncovered_bed_length, correlation_distributions, correlation_parameters, n_samples)
column_mass_per_meter, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.column_mass_per_meter, correlation_distributions, correlation_parameters, n_samples)
covered_columns_per_side, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.covered_columns_per_side, correlation_distributions, correlation_parameters, n_samples)
covered_column_height, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.covered_column_height, correlation_distributions, correlation_parameters, n_samples)
concrete_volume_covered = number_covered*(concrete_thickness*(length_covered*width_covered + 2*length_covered*height_drying + 2*width_covered*height_drying))
concrete_volume_uncovered = number_uncovered*(concrete_thickness*(length_uncovered*width_uncovered + 2*length_uncovered*height_drying + 2*width_uncovered*height_drying))
concrete_volume_storage = number_storage*(concrete_thickness*(length_covered*width_covered + 2*length_covered*height_storage + 2*width_covered*height_storage))
concrete_volume = concrete_volume_covered + concrete_volume_uncovered + concrete_volume_storage
concrete_emissions = concrete_IF_GHG*concrete_volume
roof_area = np.full(np.shape(roof_slope), np.nan)
for i in range(0,len(roof_slope)):
roof_area[i] = ((number_covered[i]+number_storage[i])*length_covered[i]*width_covered[i])/math.cos(roof_slope[i]*math.pi/180)
roof_mass_steel = roof_area*roof_mass
column_mass = (covered_column_height*column_mass_per_meter)*(covered_columns_per_side*2)*(number_storage+number_covered)
steel_emissions = steel_IF_GHG*(roof_mass_steel+column_mass)
construction_emissions_annual = (concrete_emissions+steel_emissions)/plant_lifetime/existing_population
tech_construction_emissions[:,3:4] = tech_construction_emissions[:,3:4] + construction_emissions_annual
# cost
if parameters.use_total_price.expected == 'no':
concrete_cost, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.concrete_cost, correlation_distributions, correlation_parameters, n_samples)
steel_cost, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.steel_cost, correlation_distributions, correlation_parameters, n_samples)
cap_cost = (concrete_volume)*concrete_cost + (roof_mass_steel+column_mass)*steel_cost
opex_percent, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.existing_plant_opex, correlation_distributions, correlation_parameters, n_samples)
annual_opex = (cap_cost*(opex_percent/100)) / existing_population
capex_annualized = (cap_cost * ((discount_rate*(1 + discount_rate)**plant_lifetime)/(((1 + discount_rate)**plant_lifetime) - 1))) / existing_population
construction_cost[:,3:4] = construction_cost[:,3:4] + capex_annualized
operating_cost[:,3:4] = operating_cost[:,3:4] + annual_opex
electricity_cost, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.electricity_cost, correlation_distributions, correlation_parameters, n_samples)
electricity_GHG, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.electricity_GHG, correlation_distributions, correlation_parameters, n_samples)
# assume all operating expenses come from electricity
annual_kWh = annual_opex / electricity_cost
operating_emissions = (annual_kWh * electricity_GHG)
tech_operating_emissions[:,3:4] = tech_operating_emissions[:,3:4] + operating_emissions
outputs[:,0:9] = np.concatenate((mass, mass_dry, N_total, P_total, K_total,
Mg_total, Ca_total, energy, N_ammonia), 1)
direct_emissions[:,3:4] = direct_emissions[:,3:4] + CH4eq + N2Oeq
additional_storage = additional_storage + retention_time/365
return outputs, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, correlation_distributions, correlation_parameters, additional_storage
#%% drying beds alternate function
def drying_beds_alt(inputs, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, parameters, correlation_distributions, correlation_parameters, n_samples, rate_constant, maximum_methane_emission, CH4_GWP, N2O_GWP, previous_storage_time, additional_storage, concrete_thickness, roof_slope, roof_mass, concrete_IF_GHG, steel_IF_GHG, plant_lifetime, existing_population, discount_rate):
outputs = copy.deepcopy(inputs)
mass = np.reshape(inputs[:,0], (-1,1))
mass_dry = np.reshape(inputs[:,1], (-1,1))
N_total = np.reshape(inputs[:,2], (-1,1))
P_total = np.reshape(inputs[:,3], (-1,1))
K_total = np.reshape(inputs[:,4], (-1,1))
Mg_total = np.reshape(inputs[:,5], (-1,1))
Ca_total = np.reshape(inputs[:,6], (-1,1))
energy = np.reshape(inputs[:,7], (-1,1))
N_ammonia = np.reshape(inputs[:,8], (-1,1))
# parameters
COD_degradation, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.COD_degradation_drying_bed_alt, correlation_distributions, correlation_parameters, n_samples)
retention_time, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.retention_time_drying_bed_alt, correlation_distributions, correlation_parameters, n_samples)
MCF, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.MCF_drying_bed_alt, correlation_distributions, correlation_parameters, n_samples)
N2O_EF, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.N2O_EF_drying_bed_alt, correlation_distributions, correlation_parameters, n_samples)
N_denitrification, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.N_max_denitrification_drying_bed_alt, correlation_distributions, correlation_parameters, n_samples)
final_solids_content, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.final_solids_content_drying_bed_alt, correlation_distributions, correlation_parameters, n_samples)
# calculations
COD_influent = energy/14/1000
#!!! Changed algorithm
t0 = previous_storage_time + additional_storage
t = retention_time / 365
COD_loss = first_order_decay(rate_constant, t0, t,
max_decay=COD_degradation/100,
tot=COD_influent)
# COD_degradable = COD_influent * COD_degradation/100
# COD_initial = (COD_degradable*rate_constant*previous_storage_time)/(np.exp(-rate_constant*additional_storage) - np.exp(-rate_constant*(additional_storage+previous_storage_time)))
# COD_after = (COD_initial/(rate_constant*(previous_storage_time)))*(np.exp(-rate_constant*(additional_storage+retention_time/365)) - np.exp(-rate_constant*(previous_storage_time + additional_storage + retention_time/365)))
# COD_loss = COD_degradable - COD_after
COD_reduction = COD_loss/COD_influent
CH4_emission = COD_loss*(MCF/100)*maximum_methane_emission
CH4eq = CH4_emission*CH4_GWP
COD_effluent = COD_influent - COD_loss
energy = COD_effluent*14*1000
mass = mass - mass_dry*COD_reduction
mass_dry = mass_dry - mass_dry*COD_reduction
N_degradable = N_total * (N_denitrification/100)
N_initial = (N_degradable*rate_constant*previous_storage_time)/(np.exp(-rate_constant*additional_storage) - np.exp(-rate_constant*(additional_storage+previous_storage_time)))
N_after = (N_initial/(rate_constant*(previous_storage_time)))*(np.exp(-rate_constant*(additional_storage+retention_time/365)) - np.exp(-rate_constant*(previous_storage_time + additional_storage + retention_time/365)))
N_loss = N_degradable - N_after
N2O_emission = N_total*(N_loss/N_degradable)*(N2O_EF/100)*(44/28)
for i in range(0, len(N2O_emission)):
if N2O_emission[i] > N_loss[i]*(44/28):
N2O_emission[i] = N_loss[i]*(44/28)
N2Oeq = N2O_emission*N2O_GWP
N_total = N_total - N_loss
N_ammonia = N_ammonia - N_loss
for i in range(0, len(N_ammonia)):
if N_ammonia[i] < 0:
N_ammonia[i] = 0
for i in range(0, len(mass_dry)):
if final_solids_content[i] > ((mass_dry[i]/mass[i])*100):
mass[i] = mass_dry[i] / (final_solids_content[i]/100)
# construction costs and emissions
number_unplanted, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.number_unplanted_beds_alt, correlation_distributions, correlation_parameters, n_samples)
unplanted_height, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.unplanted_bed_wall_height_alt, correlation_distributions, correlation_parameters, n_samples)
unplanted_width, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.unplanted_bed_width_alt, correlation_distributions, correlation_parameters, n_samples)
unplanted_length, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.unplanted_bed_length_alt, correlation_distributions, correlation_parameters, n_samples)
number_planted, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.number_planted_beds_alt, correlation_distributions, correlation_parameters, n_samples)
planted_height, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.planted_bed_wall_height_alt, correlation_distributions, correlation_parameters, n_samples)
planted_width, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.planted_bed_width_alt, correlation_distributions, correlation_parameters, n_samples)
planted_length, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.planted_bed_length_alt, correlation_distributions, correlation_parameters, n_samples)
concrete_volume_unplanted = number_unplanted*(concrete_thickness*(unplanted_length*unplanted_width + 2*unplanted_length*unplanted_height + 2*unplanted_width*unplanted_height))
concrete_volume_planted = number_planted*(concrete_thickness*(planted_length*planted_width + 2*planted_length*planted_height + 2*planted_width*planted_height))
concrete_volume = concrete_volume_unplanted + concrete_volume_planted
concrete_emissions = concrete_IF_GHG*concrete_volume
construction_emissions_annual = (concrete_emissions)/plant_lifetime/existing_population
tech_construction_emissions[:,3:4] = tech_construction_emissions[:,3:4] + construction_emissions_annual
# cost
if parameters.use_total_price.expected == 'no':
concrete_cost, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.concrete_cost, correlation_distributions, correlation_parameters, n_samples)
cap_cost = (concrete_volume)*concrete_cost
opex_percent, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.existing_plant_opex, correlation_distributions, correlation_parameters, n_samples)
annual_opex = (cap_cost*(opex_percent/100)) / existing_population
capex_annualized = (cap_cost * ((discount_rate*(1 + discount_rate)**plant_lifetime)/(((1 + discount_rate)**plant_lifetime) - 1))) / existing_population
construction_cost[:,3:4] = construction_cost[:,3:4] + capex_annualized
operating_cost[:,3:4] = operating_cost[:,3:4] + annual_opex
electricity_cost, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.electricity_cost, correlation_distributions, correlation_parameters, n_samples)
electricity_GHG, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.electricity_GHG, correlation_distributions, correlation_parameters, n_samples)
# assume all operating expenses come from electricity
annual_kWh = annual_opex / electricity_cost
operating_emissions = (annual_kWh * electricity_GHG)
tech_operating_emissions[:,3:4] = tech_operating_emissions[:,3:4] + operating_emissions
outputs[:,0:9] = np.concatenate((mass, mass_dry, N_total, P_total, K_total,
Mg_total, Ca_total, energy, N_ammonia), 1)
direct_emissions[:,3:4] = direct_emissions[:,3:4] + CH4eq + N2Oeq
additional_storage = additional_storage + retention_time/365
return outputs, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, correlation_distributions, correlation_parameters, additional_storage
#%% anaerobic baffled reactor function
def ABR(inputs, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, biogas, parameters, correlation_distributions, correlation_parameters, n_samples, rate_constant, maximum_methane_emission, CH4_GWP, N2O_GWP, CH4_energy, previous_storage_time, additional_storage, sludge_flow_alt, concrete_thickness, concrete_IF_GHG, gravel_IF_GHG, gravel_bulk_density, excavation_IF_GHG, plant_lifetime, sludge_pop_alt, discount_rate):
outputs = copy.deepcopy(inputs)
mass = np.reshape(inputs[:,0], (-1,1))
mass_dry = np.reshape(inputs[:,1], (-1,1))
N_total = np.reshape(inputs[:,2], (-1,1))
P_total = np.reshape(inputs[:,3], (-1,1))
K_total = np.reshape(inputs[:,4], (-1,1))
Mg_total = np.reshape(inputs[:,5], (-1,1))
Ca_total = np.reshape(inputs[:,6], (-1,1))
energy = np.reshape(inputs[:,7], (-1,1))
N_ammonia = np.reshape(inputs[:,8], (-1,1))
# biogas production
# parameters
MCF, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.MCF_ABR, correlation_distributions, correlation_parameters, n_samples)
COD_removal, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.COD_removal_ABR, correlation_distributions, correlation_parameters, n_samples)
residence_time, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.retention_time_ABR, correlation_distributions, correlation_parameters, n_samples)
# calculations
# carbon (kg COD/yr; assume 14 kJ/g COD in wastewater)
COD_total = (energy/14/1000)
COD_degrade = COD_total*(COD_removal/100)
CH4_production = COD_degrade*(MCF/100)*maximum_methane_emission
COD_total = COD_total - COD_degrade
energy = COD_total*14*1000
if parameters.CH4_captured_ABR.expected == 'yes':
CH4eq = np.full([n_samples, 1], 0)
else:
CH4eq = CH4_production*CH4_GWP
CH4_production = 0
# nitrogen emissions (kg N/yr; N2O expressed as kg N2O/yr)
N_removal, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.N_removal_ABR, correlation_distributions, correlation_parameters, n_samples)
N_removed = N_total * (N_removal/100)
N_total = N_total - N_removed
N_ammonia = N_ammonia- N_removed
for i in range(0, len(N_ammonia)):
if N_ammonia[i] < 0:
N_ammonia[i] = 0
if parameters.N_emission_in_biogas_ABR.expected == 'yes':
N2O_EF, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.N2O_EF_ABR, correlation_distributions, correlation_parameters, n_samples)
N_denitrification, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.N_max_denitrification_ABR, correlation_distributions, correlation_parameters, n_samples)
N_degradable = N_removed * (N_denitrification/100)
N2O_emission = N_removed*(N2O_EF/100)*(44/28)
for i in range(0, len(N2O_emission)):
if N2O_emission[i] > N_degradable[i]*(44/28):
N2O_emission[i] = N_degradable[i]*(44/28)
N2Oeq = N2O_emission*N2O_GWP
else:
N2Oeq = np.full([n_samples, 1], 0)
# solids loss (based on COD loss)
mass = mass - mass_dry * (COD_removal/100)
mass_dry = mass_dry - mass_dry * (COD_removal/100)
# energy in biogas (kJ/yr)
biogas_new = CH4_production / 16 * CH4_energy * 1000
biogas = biogas + biogas_new
outputs[:,0:9] = np.concatenate((mass, mass_dry, N_total, P_total, K_total,
Mg_total, Ca_total, energy, N_ammonia), 1)
direct_emissions[:,3:4] = direct_emissions[:,3:4] + CH4eq + N2Oeq
# construction costs and emissions
number, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.number_ABR, correlation_distributions, correlation_parameters, n_samples)
length, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.length_ABR, correlation_distributions, correlation_parameters, n_samples)
width, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.width_ABR, correlation_distributions, correlation_parameters, n_samples)
height, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.height_ABR, correlation_distributions, correlation_parameters, n_samples)
baffles, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.baffles_ABR, correlation_distributions, correlation_parameters, n_samples)
add_concrete, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.additional_concrete_ABR, correlation_distributions, correlation_parameters, n_samples)
volume = length*width*height
#!!! Original code didn't divide add_concrete by 100
concrete_volume = number*concrete_thickness*(2*length*width+2*length*height+(2+baffles)*width*height)*(add_concrete/100)
# concrete_volume = number*concrete_thickness*(2*length*width+2*length*height+(2+baffles)*width*height)*add_concrete
concrete_emissions = concrete_volume * concrete_IF_GHG
gravel_volume = number*length*width*height/(baffles+1)
gravel_mass = gravel_volume * gravel_bulk_density
gravel_emissions = gravel_mass * gravel_IF_GHG
excavation_volume = number*volume
excavation_emissions = excavation_volume * excavation_IF_GHG
construction_emissions_annual = (concrete_emissions+gravel_emissions+excavation_emissions)/plant_lifetime/sludge_pop_alt
tech_construction_emissions[:,3:4] = tech_construction_emissions[:,3:4] + construction_emissions_annual
# cost
if parameters.use_total_price.expected == 'no':
concrete_cost, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.concrete_cost, correlation_distributions, correlation_parameters, n_samples)
steel_cost, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.steel_cost, correlation_distributions, correlation_parameters, n_samples)
cap_cost = (concrete_volume)*concrete_cost
opex_percent, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.alternative_plant_opex, correlation_distributions, correlation_parameters, n_samples)
annual_opex = (cap_cost*(opex_percent/100)) / sludge_pop_alt
capex_annualized = (cap_cost * ((discount_rate*(1 + discount_rate)**plant_lifetime)/(((1 + discount_rate)**plant_lifetime) - 1))) / sludge_pop_alt
construction_cost[:,3:4] = construction_cost[:,3:4] + capex_annualized
operating_cost[:,3:4] = operating_cost[:,3:4] + annual_opex
electricity_cost, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.electricity_cost, correlation_distributions, correlation_parameters, n_samples)
electricity_GHG, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.electricity_GHG, correlation_distributions, correlation_parameters, n_samples)
# assume all operating expenses come from electricity
annual_kWh = annual_opex / electricity_cost
operating_emissions = (annual_kWh * electricity_GHG)
tech_operating_emissions[:,3:4] = tech_operating_emissions[:,3:4] + operating_emissions
additional_storage = additional_storage + residence_time/365
return outputs, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, biogas, correlation_distributions, correlation_parameters, additional_storage
#%% secondary liquid bed function
def secondary_liquid_bed(inputs, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, parameters, correlation_distributions, correlation_parameters, n_samples, rate_constant, maximum_methane_emission, CH4_GWP, N2O_GWP, previous_storage_time, additional_storage, concrete_thickness, roof_slope, roof_mass, concrete_IF_GHG, steel_IF_GHG, plant_lifetime, existing_population, discount_rate):
outputs = copy.deepcopy(inputs)
mass = np.reshape(inputs[:,0], (-1,1))
mass_dry = np.reshape(inputs[:,1], (-1,1))
N_total = np.reshape(inputs[:,2], (-1,1))
P_total = np.reshape(inputs[:,3], (-1,1))
K_total = np.reshape(inputs[:,4], (-1,1))
Mg_total = np.reshape(inputs[:,5], (-1,1))
Ca_total = np.reshape(inputs[:,6], (-1,1))
energy = np.reshape(inputs[:,7], (-1,1))
N_ammonia = np.reshape(inputs[:,8], (-1,1))
# parameters
COD_degradation, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.COD_degradation_sec, correlation_distributions, correlation_parameters, n_samples)
retention_time, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.retention_time_sec, correlation_distributions, correlation_parameters, n_samples)
MCF, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.MCF_sec, correlation_distributions, correlation_parameters, n_samples)
# calculations
COD_influent = energy/14/1000
#!!! Changed algorithm
t0 = previous_storage_time + additional_storage
t = retention_time / 365
COD_loss = first_order_decay(rate_constant, t0, t,
max_decay=COD_degradation/100,
tot=COD_influent)
# COD_degradable = COD_influent * COD_degradation/100
# COD_initial = (COD_degradable*rate_constant*previous_storage_time)/(np.exp(-rate_constant*additional_storage) - np.exp(-rate_constant*(additional_storage+previous_storage_time)))
# COD_after = (COD_initial/(rate_constant*(previous_storage_time)))*(np.exp(-rate_constant*(additional_storage+retention_time/365)) - np.exp(-rate_constant*(previous_storage_time + additional_storage + retention_time/365)))
# COD_loss = COD_degradable - COD_after
COD_reduction = COD_loss/COD_influent
CH4_emission = COD_loss*(MCF/100)*maximum_methane_emission
CH4eq = CH4_emission*CH4_GWP
COD_effluent = COD_influent - COD_loss
energy = COD_effluent*14*1000
mass = mass - mass_dry*COD_reduction
mass_dry = mass_dry - mass_dry*COD_reduction
if parameters.N_emission_from_anaerobic_lagoon.expected == 'yes':
N2O_EF, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.N2O_EF_sec, correlation_distributions, correlation_parameters, n_samples)
N_denitrification, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.N_max_denitrification_sec, correlation_distributions, correlation_parameters, n_samples)
#!!! Changed algorithm
t0 = previous_storage_time + additional_storage
t = retention_time / 365
N_loss = first_order_decay(rate_constant, t0, t,
max_decay=N_denitrification/100,
tot=N_total)
N2O_emission = N_loss*(N2O_EF/100)*(44/28)
# N_degradable = N_total * (N_denitrification/100)
# N_initial = (N_degradable*rate_constant*previous_storage_time)/(np.exp(-rate_constant*additional_storage) - np.exp(-rate_constant*(additional_storage+previous_storage_time)))
# N_after = (N_initial/(rate_constant*(previous_storage_time)))*(np.exp(-rate_constant*(additional_storage+retention_time/365)) - np.exp(-rate_constant*(previous_storage_time + additional_storage + retention_time/365)))
# N_loss = N_degradable - N_after
# N2O_emission = N_total*(N_loss/N_degradable)*(N2O_EF/100)*(44/28)
for i in range(0, len(N2O_emission)):
if N2O_emission[i] > N_loss[i]*(44/28):
N2O_emission[i] = N_loss[i]*(44/28)
N2Oeq = N2O_emission*N2O_GWP
N_total = N_total - N_loss
N_ammonia = N_ammonia - N_loss
for i in range(0, len(N_ammonia)):
if N_ammonia[i] < 0:
N_ammonia[i] = 0
else:
N2Oeq = np.full([n_samples, 1], 0)
# construction costs and emissions
number, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.number_sec, correlation_distributions, correlation_parameters, n_samples)
height, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.height_sec, correlation_distributions, correlation_parameters, n_samples)
width, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.width_sec, correlation_distributions, correlation_parameters, n_samples)
length, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.length_sec, correlation_distributions, correlation_parameters, n_samples)
concrete_volume = number*(concrete_thickness*(length*width + 2*length*height + 2*width*height))
concrete_emissions = concrete_IF_GHG*concrete_volume
construction_emissions_annual = (concrete_emissions)/plant_lifetime/existing_population
tech_construction_emissions[:,3:4] = tech_construction_emissions[:,3:4] + construction_emissions_annual
# cost
if parameters.use_total_price.expected == 'no':
concrete_cost, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.concrete_cost, correlation_distributions, correlation_parameters, n_samples)
cap_cost = (concrete_volume)*concrete_cost
opex_percent, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.existing_plant_opex, correlation_distributions, correlation_parameters, n_samples)
annual_opex = (cap_cost*(opex_percent/100)) / existing_population
capex_annualized = (cap_cost * ((discount_rate*(1 + discount_rate)**plant_lifetime)/(((1 + discount_rate)**plant_lifetime) - 1))) / existing_population
construction_cost[:,3:4] = construction_cost[:,3:4] + capex_annualized
operating_cost[:,3:4] = operating_cost[:,3:4] + annual_opex
electricity_cost, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.electricity_cost, correlation_distributions, correlation_parameters, n_samples)
electricity_GHG, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.electricity_GHG, correlation_distributions, correlation_parameters, n_samples)
# assume all operating expenses come from electricity
annual_kWh = annual_opex / electricity_cost
operating_emissions = (annual_kWh * electricity_GHG)
tech_operating_emissions[:,3:4] = tech_operating_emissions[:,3:4] + operating_emissions
outputs[:,0:9] = np.concatenate((mass, mass_dry, N_total, P_total, K_total,
Mg_total, Ca_total, energy, N_ammonia), 1)
direct_emissions[:,3:4] = direct_emissions[:,3:4] + CH4eq + N2Oeq
additional_storage = additional_storage + retention_time/365
return outputs, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, correlation_distributions, correlation_parameters, additional_storage
#%% main function
def main(input_excel_name, excreta_inputs, liquid_inputs, solid_inputs, direct_emissions, tech_construction_emissions, tech_operating_emissions, construction_cost, operating_cost, correlation_distributions, correlation_parameters, n_samples, rate_constant, maximum_methane_emission, CH4_GWP, N2O_GWP, previous_storage_time, discount_rate, exchange_rate):
# import module parameters from input spreadsheet
parameters = pd.DataFrame.transpose(pd.read_excel(input_excel_name, sheet_name = 'treatment').set_index('parameters'))
CH4_energy, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.CH4_energy, correlation_distributions, correlation_parameters, n_samples)
sewer_flow_rate, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.sewer_flow_existing, correlation_distributions, correlation_parameters, n_samples)
sludge_flow_rate, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.sludge_flow_existing, correlation_distributions, correlation_parameters, n_samples)
sludge_flow_alt, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.sludge_flow_alternative, correlation_distributions, correlation_parameters, n_samples)
concrete_thickness, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.concrete_thickness, correlation_distributions, correlation_parameters, n_samples)
roof_slope, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.roof_slope, correlation_distributions, correlation_parameters, n_samples)
roof_mass, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.roof_mass, correlation_distributions, correlation_parameters, n_samples)
concrete_IF_GHG, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.concrete_IF_GHG, correlation_distributions, correlation_parameters, n_samples)
stainless_steel_IF_GHG, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.stainless_steel_IF_GHG, correlation_distributions, correlation_parameters, n_samples)
stainless_steel_sheet_IF_GHG, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.stainless_steel_sheet_IF_GHG, correlation_distributions, correlation_parameters, n_samples)
excavation_IF_GHG, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.excavation_IF_GHG, correlation_distributions, correlation_parameters, n_samples)
liner_mass, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.liner_mass, correlation_distributions, correlation_parameters, n_samples)
liner_IF_GHG, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.liner_IF_GHG, correlation_distributions, correlation_parameters, n_samples)
gravel_IF_GHG, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.gravel_IF_GHG, correlation_distributions, correlation_parameters, n_samples)
gravel_bulk_density, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.gravel_bulk_density, correlation_distributions, correlation_parameters, n_samples)
existing_plant_lifetime, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.existing_plant_lifetime, correlation_distributions, correlation_parameters, n_samples)
alternative_plant_lifetime, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.alternative_plant_lifetime, correlation_distributions, correlation_parameters, n_samples)
existing_sewer_population, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.existing_sewer_population_served, correlation_distributions, correlation_parameters, n_samples)
existing_sludge_population, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.existing_sludge_population_served, correlation_distributions, correlation_parameters, n_samples)
sludge_pop_alt, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.sludge_population_alternative, correlation_distributions, correlation_parameters, n_samples)
existing_population = existing_sewer_population + existing_sludge_population
steel_IF_GHG = stainless_steel_IF_GHG + stainless_steel_sheet_IF_GHG
flow_rate = sewer_flow_rate + sludge_flow_rate
additional_storage_excreta = 0
additional_storage_liquid = 0
additional_storage_solid = 0
# define the module(s)
excreta_module = [parameters.mixed_excreta_module_1.expected, parameters.mixed_excreta_module_2.expected, parameters.mixed_excreta_module_3.expected]
liquid_module = [parameters.liquid_module_1.expected, parameters.liquid_module_2.expected, parameters.liquid_module_3.expected]
solid_module = [parameters.solid_module_1.expected, parameters.solid_module_2.expected, parameters.solid_module_3.expected]
# create temporary variables to track excreta, solids, and liquids
excreta_temp = copy.deepcopy(excreta_inputs)
liquid_temp = copy.deepcopy(liquid_inputs)
solid_temp = copy.deepcopy(solid_inputs)
biogas = np.full([len(excreta_temp), 1], 0)
for i in range(0, len(excreta_module)):
if (type(excreta_module[i]) is float) and (type(liquid_module[i]) is float) and (type(solid_module[i]) is float):
# other numerical inputs are not valid
if (not np.isnan(excreta_module[i])):
raise ValueError('The specified excreta treatment module is not valid.')
if (not np.isnan(liquid_module[i])):
raise ValueError('The specified liquid treatment module is not valid.')
if (not np.isnan(solid_module[i])):
raise ValueError('The specified solid treatment module is not valid.')
# otherwise, are both mixed and split stream options entered?
elif (type(excreta_module[i]) is str) and ((type(liquid_module[i]) is str) or (type(solid_module[i]) is str)):
raise ValueError('Modules for both the mixed and separated cases should not be evaluated simultaneously.')
# check mixed stream options first
if type(excreta_module[i]) is str:
# anaerobic baffled reactor module
if excreta_module[i] == 'ABR':
(excreta_temp, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, biogas, correlation_distributions,
correlation_parameters, additional_storage_liquid) = ABR(excreta_temp, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, biogas, parameters,
correlation_distributions, correlation_parameters, n_samples, rate_constant, maximum_methane_emission, CH4_GWP, N2O_GWP, CH4_energy, previous_storage_time, additional_storage_liquid, sludge_flow_alt, concrete_thickness, concrete_IF_GHG, gravel_IF_GHG, gravel_bulk_density, excavation_IF_GHG, alternative_plant_lifetime, sludge_pop_alt, discount_rate)
# sedimentation module (mixed input, separate outputs)
elif excreta_module[i] == 'sedimentation':
(liquid_temp, solid_temp, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, correlation_distributions,
correlation_parameters, additional_storage_liquid, additional_storage_solid) = sedimentation(excreta_temp, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, parameters,
correlation_distributions, correlation_parameters, n_samples, rate_constant, maximum_methane_emission, CH4_GWP, N2O_GWP, previous_storage_time, additional_storage_excreta, concrete_thickness, roof_slope, roof_mass, concrete_IF_GHG, steel_IF_GHG, existing_plant_lifetime, existing_sludge_population, discount_rate)
excreta_temp = np.full(np.shape(excreta_temp), np.nan)
# sludge separator module (mixed input, separate outputs)
elif excreta_module[i] == 'sludge_separator':
(liquid_temp, solid_temp, direct_emissions, correlation_distributions,
correlation_parameters, additional_storage_liquid, additional_storage_solid) = sludge_separator(excreta_temp, direct_emissions, parameters,
correlation_distributions, correlation_parameters, n_samples, rate_constant, maximum_methane_emission, CH4_GWP, N2O_GWP, previous_storage_time, additional_storage_excreta)
excreta_temp = np.full(np.shape(excreta_temp), np.nan)
# if the excreta module input is not supported/valid
else:
raise ValueError('The treatment module specified for excreta is not valid.')
# check liquid stream
if (type(liquid_module[i]) is str):
# anaerobic lagoon module
if liquid_module[i] == 'anaerobic_lagoon':
(liquid_temp, direct_emissions, tech_construction_emissions, tech_operating_emissions, correlation_distributions,
correlation_parameters, additional_storage_liquid) = anaerobic_lagoon(liquid_temp, direct_emissions, tech_construction_emissions, tech_operating_emissions, parameters,
correlation_distributions, correlation_parameters, n_samples, rate_constant, maximum_methane_emission, CH4_GWP, N2O_GWP, previous_storage_time, additional_storage_liquid, flow_rate, excavation_IF_GHG, liner_mass, liner_IF_GHG, existing_plant_lifetime, existing_population)
elif liquid_module[i] == 'facultative_lagoon':
(liquid_temp, direct_emissions, tech_construction_emissions, tech_operating_emissions, correlation_distributions,
correlation_parameters, additional_storage_liquid) = facultative_lagoon(liquid_temp, direct_emissions, tech_construction_emissions, tech_operating_emissions, parameters,
correlation_distributions, correlation_parameters, n_samples, rate_constant, maximum_methane_emission, CH4_GWP, N2O_GWP, previous_storage_time, additional_storage_liquid, flow_rate, excavation_IF_GHG, liner_mass, liner_IF_GHG, existing_plant_lifetime, existing_population)
elif liquid_module[i] == 'secondary_liquid_bed':
(liquid_temp, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, correlation_distributions,
correlation_parameters, additional_storage_solid) = secondary_liquid_bed(liquid_temp, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, parameters,
correlation_distributions, correlation_parameters, n_samples, rate_constant, maximum_methane_emission, CH4_GWP, N2O_GWP, previous_storage_time, additional_storage_solid, concrete_thickness, roof_slope, roof_mass, concrete_IF_GHG, steel_IF_GHG, existing_plant_lifetime, existing_sludge_population, discount_rate)
# if the liquid module input is not supported/valid
else:
raise ValueError('The treatment module specified for liquid is not valid.')
# check solid stream
if (type(solid_module[i]) is str):
# unplanted drying bed module
if solid_module[i] == 'unplanted_drying_bed':
(solid_temp, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, correlation_distributions,
correlation_parameters, additional_storage_solid) = unplanted_drying_bed(solid_temp, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, parameters,
correlation_distributions, correlation_parameters, n_samples, rate_constant, maximum_methane_emission, CH4_GWP, N2O_GWP, previous_storage_time, additional_storage_solid, concrete_thickness, roof_slope, roof_mass, concrete_IF_GHG, steel_IF_GHG, existing_plant_lifetime, existing_sludge_population, discount_rate)
# drying beds alternate module
elif solid_module[i] == 'drying_beds_alt':
(solid_temp, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, correlation_distributions,
correlation_parameters, additional_storage_solid) = drying_beds_alt(solid_temp, construction_cost, operating_cost, direct_emissions, tech_construction_emissions, tech_operating_emissions, parameters,
correlation_distributions, correlation_parameters, n_samples, rate_constant, maximum_methane_emission, CH4_GWP, N2O_GWP, previous_storage_time, additional_storage_solid, concrete_thickness, roof_slope, roof_mass, concrete_IF_GHG, steel_IF_GHG, existing_plant_lifetime, existing_sludge_population, discount_rate)
# if the solid module input is not supported/valid
else:
raise ValueError('The treatment module specified for solid is not valid.')
# after iteration, set outputs equal to current values of temporary variables
excreta_outputs = excreta_temp
liquid_outputs = liquid_temp
solid_outputs = solid_temp
electricity_cost, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.electricity_cost, correlation_distributions, correlation_parameters, n_samples)
electricity_GHG, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.electricity_GHG, correlation_distributions, correlation_parameters, n_samples)
if parameters.use_total_price.expected == 'yes':
if parameters.use_existing_plant.expected == 'yes':
cap_cost, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.existing_plant_capex, correlation_distributions, correlation_parameters, n_samples)
annual_electricity, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.existing_plant_electricity, correlation_distributions, correlation_parameters, n_samples)
staff, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.existing_plant_staff, correlation_distributions, correlation_parameters, n_samples)
monthly_salary, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.existing_plant_salary, correlation_distributions, correlation_parameters, n_samples)
annual_labor = staff * (monthly_salary/exchange_rate) * 12
annual_opex = (annual_electricity*electricity_cost + annual_labor) / existing_population
capex_annualized = (cap_cost * ((discount_rate*(1 + discount_rate)**existing_plant_lifetime)/(((1 + discount_rate)**existing_plant_lifetime) - 1))) / existing_population
operating_emissions = (annual_electricity * electricity_GHG) / existing_population
elif parameters.use_existing_plant.expected == 'no':
cap_cost, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.alternative_plant_capex, correlation_distributions, correlation_parameters, n_samples)
annual_electricity, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.alternative_plant_electricity, correlation_distributions, correlation_parameters, n_samples)
skilled_staff, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.alternative_plant_skilled_staff, correlation_distributions, correlation_parameters, n_samples)
unskilled_staff, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.alternative_plant_unskilled_staff, correlation_distributions, correlation_parameters, n_samples)
skilled_monthly_salary, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.alternative_plant_skilled_salary, correlation_distributions, correlation_parameters, n_samples)
unskilled_monthly_salary, correlation_distributions, correlation_parameters = lhs.lhs_distribution(parameters.alternative_plant_unskilled_salary, correlation_distributions, correlation_parameters, n_samples)
annual_labor = (skilled_staff*(skilled_monthly_salary/exchange_rate) + unskilled_staff*(unskilled_monthly_salary/exchange_rate)) * 12
annual_opex = (annual_electricity*electricity_cost + annual_labor) / sludge_pop_alt
capex_annualized = (cap_cost * ((discount_rate*(1 + discount_rate)**alternative_plant_lifetime)/(((1 + discount_rate)**alternative_plant_lifetime) - 1))) / sludge_pop_alt
operating_emissions = (annual_electricity * electricity_GHG) / sludge_pop_alt
construction_cost[:,3:4] = construction_cost[:,3:4] + capex_annualized
operating_cost[:,3:4] = operating_cost[:,3:4] + annual_opex
# assume all operating expenses come from electricity
tech_operating_emissions[:,3:4] = tech_operating_emissions[:,3:4] + operating_emissions
return excreta_outputs, liquid_outputs, solid_outputs, direct_emissions, tech_construction_emissions, tech_operating_emissions, construction_cost, operating_cost, biogas, correlation_distributions, correlation_parameters
| 72.828873
| 476
| 0.77112
| 10,839
| 89,798
| 5.999539
| 0.032014
| 0.135816
| 0.198065
| 0.254656
| 0.915438
| 0.90698
| 0.892094
| 0.85468
| 0.79929
| 0.753018
| 0
| 0.019858
| 0.142008
| 89,798
| 1,233
| 477
| 72.828873
| 0.82417
| 0.102864
| 0
| 0.602138
| 0
| 0
| 0.007276
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013064
| false
| 0
| 0.005938
| 0
| 0.032067
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
071172a60397a1cf3f0ec4772294884d59cf8be5
| 34,053
|
py
|
Python
|
resource_rc.py
|
ljw8947/renren-dumps
|
d83622b75fb9995a2fa847290201e91c65d2b9a6
|
[
"MIT"
] | 14
|
2019-04-05T10:36:13.000Z
|
2022-03-20T19:30:57.000Z
|
resource_rc.py
|
ljw8947/renren-dumps
|
d83622b75fb9995a2fa847290201e91c65d2b9a6
|
[
"MIT"
] | 7
|
2019-04-06T08:13:16.000Z
|
2021-05-09T14:00:33.000Z
|
resource_rc.py
|
ljw8947/renren-dumps
|
d83622b75fb9995a2fa847290201e91c65d2b9a6
|
[
"MIT"
] | 3
|
2019-04-05T02:59:53.000Z
|
2020-05-05T10:52:03.000Z
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: 周四 4月 11 14:07:08 2019
# by: The Resource Compiler for PySide2 (Qt v5.12.2)
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore
qt_resource_data = b"\
\x00\x00 \x9c\
\x00\
\x00\x01\x00\x02\x00 \x00\x00\x01\x00 \x00(\x10\x00\
\x00&\x00\x00\x00\x10\x10\x00\x00\x01\x00 \x00(\x04\x00\
\x00N\x10\x00\x00(\x00\x00\x00 \x00\x00\x00@\x00\x00\
\x00\x01\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\xb6\x85m?\xb5\x85l\xb1\xb6\x85l\
\xe7\xb6\x86l\xff\xb6\x86l\xff\xb6\x86l\xff\xb6\x86l\
\xff\xb6\x86l\xff\xb6\x86l\xff\xb6\x86l\xff\xb6\x86l\
\xff\xb6\x86l\xff\xb6\x86l\xff\xb6\x86l\xff\xb6\x86l\
\xff\xb6\x86l\xff\xb6\x86l\xff\xb6\x86l\xff\xb6\x86l\
\xff\xb6\x86l\xff\xb6\x86l\xff\xb6\x86l\xff\xb6\x86l\
\xff\xb6\x86l\xff\xb6\x86l\xff\xb6\x85l\xe7\xb5\x85l\
\xb1\xb6\x85m?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\xb6\x88lx\xbf\x97\x7f\xff\xd7\xbb\xa9\xff\xe9\xd8\xc8\
\xff\xf0\xe2\xd4\xff\xf0\xe2\xd3\xff\xf0\xe2\xd3\xff\xf0\xe2\xd3\
\xff\xf0\xe2\xd3\xff\xf0\xe2\xd3\xff\xf0\xe2\xd3\xff\xf0\xe2\xd3\
\xff\xf0\xe2\xd3\xff\xf0\xe2\xd3\xff\xf0\xe2\xd3\xff\xf0\xe2\xd3\
\xff\xf0\xe2\xd3\xff\xf0\xe2\xd3\xff\xf0\xe2\xd3\xff\xf0\xe2\xd3\
\xff\xf0\xe2\xd3\xff\xf0\xe2\xd3\xff\xf0\xe2\xd3\xff\xf0\xe2\xd3\
\xff\xf0\xe2\xd3\xff\xf0\xe2\xd4\xff\xe9\xd8\xc8\xff\xd7\xbb\xa9\
\xff\xbf\x97\x7f\xff\xb6\x88lx\x00\x00\x00\x00\xb6\x89m\
?\xb8\x88o\xfc\xe2\xce\xbd\xff\xe8\xd8\xc9\xff\xe8\xd8\xc9\
\xff\xe8\xd8\xc9\xff\xe8\xd8\xc9\xff\xe8\xd8\xc9\xff\xe8\xd8\xc9\
\xff\xe8\xd8\xc9\xff\xe8\xd8\xc9\xff\xe8\xd8\xc9\xff\xe8\xd8\xc9\
\xff\xe8\xd8\xc9\xff\xe8\xd8\xc9\xff\xe8\xd8\xc9\xff\xe8\xd8\xc9\
\xff\xe8\xd8\xc9\xff\xe8\xd8\xc9\xff\xe8\xd8\xc9\xff\xe8\xd8\xc9\
\xff\xe8\xd8\xc9\xff\xe8\xd8\xc9\xff\xe8\xd8\xc9\xff\xe8\xd8\xc9\
\xff\xe8\xd8\xc9\xff\xe8\xd8\xc9\xff\xe8\xd8\xc9\xff\xe8\xd8\xc9\
\xff\xe2\xce\xbd\xff\xb8\x88o\xfc\xb6\x88n<\xb8\x8ap\
\xc3\xd3\xb6\xa4\xff\xe2\xcf\xbf\xff\xe2\xcf\xbf\xff\xe2\xcf\xbf\
\xff\xe2\xcf\xbf\xff\xe2\xcf\xbf\xff\xe2\xcf\xbf\xff\xe2\xcf\xbf\
\xff\xe2\xcf\xbf\xff\xe2\xcf\xbf\xff\xe2\xcf\xbf\xff\xe2\xcf\xbf\
\xff\xe2\xcf\xbf\xff\xe2\xcf\xbf\xff\xe2\xcf\xbf\xff\xe2\xcf\xbf\
\xff\xe2\xcf\xbf\xff\xe2\xcf\xbf\xff\xe2\xcf\xbf\xff\xe2\xcf\xbf\
\xff\xe2\xcf\xbf\xff\xe2\xcf\xbf\xff\xe2\xcf\xbf\xff\xe2\xcf\xbf\
\xff\xe2\xcf\xbf\xff\xe2\xcf\xbf\xff\xe2\xcf\xbf\xff\xe2\xcf\xbf\
\xff\xe2\xcf\xbf\xff\xd3\xb5\xa3\xff\xb8\x8bp\xc0\xb9\x8dr\
\xf0\xdc\xc7\xb7\xff\xdd\xc9\xb9\xff\xdd\xc9\xb9\xff\xdd\xc9\xb9\
\xff\xdd\xc9\xb9\xff\xdd\xc9\xb9\xff\xdd\xc9\xb9\xff\xdd\xc9\xb9\
\xff\xdd\xc9\xb9\xff\xdd\xc9\xb9\xff\xdd\xc9\xb9\xff\xdd\xc9\xb9\
\xff\xdd\xc9\xb9\xff\xdd\xc9\xb9\xff\xdd\xc9\xb9\xff\xdd\xc9\xb9\
\xff\xdd\xc9\xb9\xff\xdd\xc9\xb9\xff\xdd\xc9\xb9\xff\xdd\xc9\xb9\
\xff\xdd\xc9\xb9\xff\xdd\xc9\xb9\xff\xdd\xc9\xb9\xff\xdd\xc9\xb9\
\xff\xdd\xc9\xb9\xff\xdd\xc9\xb9\xff\xdd\xc9\xb9\xff\xdd\xc9\xb9\
\xff\xdd\xc9\xb9\xff\xdc\xc8\xb8\xff\xb9\x8cs\xf3\xbc\x8fv\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xbc\x8fv\xff\xbd\x91x\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xbd\x91x\xff\xbe\x93{\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xbe\x93{\xff\xc0\x95}\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xda\xc5\xb7\xff\xb3\x88h\xff\x97W*\
\xff\x856\x00\xff\x856\x00\xff\x856\x00\xff\x9aY,\
\xff\xb7\x8aj\xff\xda\xc4\xb4\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xb6\x8ak\xff\x99Y+\xff\x856\x00\
\xff\x856\x00\xff\x856\x00\xff\x98X+\xff\xb4\x87g\
\xff\xda\xc4\xb4\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xc0\x95}\xff\xc1\x97\x80\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdb\xc7\xb7\
\xff\xb3\x8cp\xff\x8eO#\xff\x884\x03\xff\x919\x08\
\xff\x9b@\x0f\xff\x9cB\x0f\xff\x98=\x0c\xff\x8a4\x06\
\xff\x95L\x1e\xff\x878\x03\xff\xb4\x84c\xff\xbc\x94w\
\xff\x97W*\xff\x8eA\x13\xff\x8c4\x07\xff\x9a?\x0d\
\xff\x9dC\x11\xff\x9bB\x11\xff\x91:\x0a\xff\x853\x03\
\xff\x867\x01\xff\xb0\x82`\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xc1\x97\x80\xff\xc2\x99\x83\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xacxS\
\xff\x90D\x11\xff\xba\x83_\xff\x98H\x1d\xff\xa5O\x17\
\xff\xacV\x1e\xff\xacX!\xff\xa0N\x1e\xff\xd0\xa9\x94\
\xff\xf0\xd4\xc0\xff\xbf\x8cn\xff\x8cA\x10\xff\x8dA\x0e\
\xff\xca\x91m\xff\xe7\xc8\xb2\xff\xafsR\xff\xa2L\x17\
\xff\xabU\x1c\xff\xabV\x1f\xff\x9dL\x1b\xff\xc1\x93z\
\xff\xd6\xa5\x85\xff\x93H\x15\xff\xacyT\xff\xdc\xc8\xb8\
\xff\xdc\xc8\xb8\xff\xdc\xc8\xb8\xff\xc2\x99\x83\xff\xc4\x9c\x85\
\xff\xdd\xca\xb9\xff\xdd\xca\xb9\xff\xb4\x86d\xff\x95K\x18\
\xff\xe0\xb1\x93\xff\xf5\xd8\xc3\xff\xe5\xcd\xbd\xff\x9eO\x1f\
\xff\xa4L\x13\xff\x9fK\x15\xff\xd4\xb2\x9f\xff\xfb\xe9\xda\
\xff\xfa\xe2\xd0\xff\xf1\xd8\xc4\xff\xcf\xac\x97\xff\xe9\xca\xb4\
\xff\xf5\xd6\xbe\xff\xf9\xdf\xca\xff\xfc\xea\xdc\xff\xabkF\
\xff\xa1H\x10\xff\x9eG\x12\xff\xcb\xa3\x8d\xff\xfa\xe6\xd4\
\xff\xf5\xd6\xbe\xff\xe1\xb2\x95\xff\x90E\x13\xff\xbe\x97z\
\xff\xdd\xca\xb9\xff\xdd\xca\xb9\xff\xc4\x9c\x85\xff\xc5\x9e\x88\
\xff\xde\xcb\xbb\xff\xdc\xc8\xb7\xff\x856\x00\xff\xb6\x7fa\
\xff\xf7\xe0\xcf\xff\xfb\xe2\xd0\xff\xfc\xe5\xd4\xff\xdf\xc2\xb2\
\xff\x93<\x0a\xff\xbb\x8an\xff\xfc\xe9\xda\xff\xfb\xe4\xd1\
\xff\xfc\xe8\xd9\xff\xe6\xce\xc0\xffLFv\xff\xa3\x80v\
\xff\xf5\xdf\xd1\xff\xfb\xe3\xd0\xff\xfb\xe3\xcf\xff\xf5\xe3\xd6\
\xff\x97F\x17\xff\xa8gB\xff\xfc\xea\xdc\xff\xfb\xe2\xce\
\xff\xfc\xe3\xd2\xff\xe8\xcd\xba\xff\x94J\x22\xff\x856\x00\
\xff\xdd\xc9\xb9\xff\xde\xcb\xbb\xff\xc5\x9e\x88\xff\xc8\xa1\x8b\
\xff\xe0\xcd\xbd\xff\xb8\x8bj\xff\x8c9\x08\xff\x9dL\x1c\
\xff\xbf\x8es\xff\xfc\xea\xdc\xff\xfb\xe4\xd2\xff\xfc\xea\xdc\
\xff\xa8mN\xff\xf4\xe0\xd2\xff\xfb\xe4\xd2\xff\xfc\xe6\xd5\
\xff\xe7\xd0\xc1\xff\x99_G\xff\x0a6\xb7\xff.<\x8c\
\xff\xa8e=\xff\xf5\xe2\xd4\xff\xfb\xe4\xd3\xff\xfc\xe8\xd8\
\xff\xbd\x8fw\xff\xec\xd5\xc5\xff\xfb\xe5\xd4\xff\xfc\xe6\xd5\
\xff\xe6\xcd\xbe\xff\xa2W(\xff\xa8X)\xff\x8d;\x0a\
\xff\xba\x8eo\xff\xe0\xcd\xbd\xff\xc8\xa1\x8b\xff\xc9\xa3\x8e\
\xff\xe1\xcf\xbf\xff\x99X*\xff\x9aD\x13\xff\xaeU\x1c\
\xff\xa3L\x15\xff\xd3\xae\x9a\xff\xfc\xe9\xda\xff\xfb\xe7\xd7\
\xff\xf4\xdd\xcd\xff\xfb\xe7\xd7\xff\xfb\xe7\xd7\xff\xf5\xe4\xd8\
\xff\xa0Q \xff`Jl\xff\x04M\xd8\xff\x07?\xc2\
\xff\x98P-\xff\xb4yW\xff\xfc\xec\xdf\xff\xfb\xe7\xd7\
\xff\xf3\xdc\xcc\xff\xfb\xe7\xd7\xff\xfb\xe7\xd6\xff\xf5\xe4\xd7\
\xff\xa6Z-\xff\xacT\x1a\xff\xb2_(\xff\x9dJ\x1a\
\xff\x9aZ,\xff\xe1\xcf\xbf\xff\xc9\xa3\x8e\xff\xcb\xa5\x91\
\xff\xe3\xd1\xc1\xff\x88;\x07\xff\xa7M\x19\xff\xb6\x5c\x1f\
\xff\xb3Y\x1d\xff\xa6V%\xff\xf5\xe5\xda\xff\xfc\xe8\xda\
\xff\xfc\xe7\xd8\xff\xfc\xe8\xd9\xff\xfd\xed\xe2\xff\xb7|Z\
\xff\xafV\x1c\xff;B\x8e\xff\x02M\xe5\xff\x05K\xd2\
\xffyQY\xff\xa7P\x19\xff\xe1\xc5\xb6\xff\xfc\xea\xdd\
\xff\xfc\xe7\xd8\xff\xfc\xe7\xd8\xff\xfd\xec\xe0\xff\xc2\x91v\
\xff\xadU\x1b\xff\xb6\x5c\x1e\xff\xb6^!\xff\xaaR\x1f\
\xff\x88;\x07\xff\xe3\xd1\xc1\xff\xcb\xa5\x91\xff\xcd\xa8\x94\
\xff\xe2\xcd\xbd\xff\x867\x01\xff\xaeU\x1f\xff\xbcd&\
\xff\xbcd&\xff\xb1[\x22\xff\xd1\xa9\x92\xff\xfc\xed\xe2\
\xff\xfc\xea\xdc\xff\xfc\xec\xdf\xff\xe9\xd3\xc4\xff\xabV\x1f\
\xff\xbac%\xff)?\xa0\xff\x02Q\xeb\xff\x04Q\xd9\
\xffeOp\xff\xb6_$\xff\xbc\x82_\xff\xfd\xef\xe5\
\xff\xfc\xea\xdc\xff\xfc\xeb\xde\xff\xf6\xe7\xdd\xff\xac]+\
\xff\xb9b%\xff\xbcd&\xff\xbcd&\xff\xafU\x1f\
\xff\x867\x01\xff\xe2\xcd\xbd\xff\xcd\xa8\x94\xff\xce\xaa\x97\
\xff\xe7\xd6\xc7\xff\x88<\x07\xff\xb2Y\x22\xff\xc3l-\
\xff\xc4m.\xff\xc0i,\xff\xb9tI\xff\xfd\xf2\xea\
\xff\xfc\xed\xe1\xff\xfd\xf0\xe6\xff\xcf\xa3\x8a\xff\xbae)\
\xff\xc4m.\xff-A\xa0\xff\x02X\xec\xff\x04V\xdd\
\xff`Py\xff\xc2l-\xff\xb0\x5c%\xff\xf6\xe9\xe0\
\xff\xfc\xee\xe2\xff\xfc\xef\xe5\xff\xd8\xb5\xa1\xff\xb7b(\
\xff\xc4m.\xff\xc4m.\xff\xc3l-\xff\xb2Y\x22\
\xff\x88<\x07\xff\xe7\xd6\xc7\xff\xce\xaa\x97\xff\xd0\xad\x9a\
\xff\xe9\xd8\xca\xff\x9c\x5c/\xff\xaaR\x1c\xff\xc8q4\
\xff\xccv7\xff\xcbu7\xff\xb7d,\xff\xfe\xf6\xf0\
\xff\xfd\xf0\xe6\xff\xfe\xf4\xec\xff\xc0\x82[\xff\xc5q4\
\xff\xccv7\xffHK\x90\xff\x03]\xe6\xff\x05V\xd8\
\xffvZq\xff\xccv7\xff\xbch/\xff\xe5\xcc\xbd\
\xff\xfd\xf1\xe8\xff\xfd\xf3\xeb\xff\xd2\xa7\x8e\xff\xc2n2\
\xff\xccv7\xff\xccv7\xff\xc8q4\xff\xaaR\x1c\
\xff\x9c\x5c/\xff\xe9\xd8\xca\xff\xd0\xad\x9a\xff\xd1\xaf\x9d\
\xff\xeb\xdb\xcd\xff\xbe\x93s\xff\x9dJ\x15\xff\xccu;\
\xff\xd5\x81D\xff\xd6\x82D\xff\xc4r:\xff\xed\xd8\xcb\
\xff\xfd\xf2\xe9\xff\xfe\xf5\xee\xff\xca\x8cd\xff\xd1}A\
\xff\xd5\x82C\xff\x81ct\xff\x05X\xd7\xff\x09M\xca\
\xff\xa8r^\xff\xd6\x82D\xff\xc7t;\xff\xe8\xcf\xbf\
\xff\xfd\xf2\xea\xff\xfd\xf4\xed\xff\xcc\x93n\xff\xcf|@\
\xff\xd6\x82D\xff\xd5\x81C\xff\xcbt:\xff\x9cH\x14\
\xff\xc0\x96x\xff\xeb\xdb\xcd\xff\xd1\xaf\x9d\xff\xd3\xb2\x9f\
\xff\xed\xde\xcf\xff\xeb\xda\xca\xff\x94I\x14\xff\xcd\x85W\
\xff\xe1\x9fn\xff\xe0\x92V\xff\xd2\x84M\xff\xec\xd2\xc1\
\xff\xfe\xf2\xe8\xff\xfe\xf4\xeb\xff\xd2\x97p\xff\xdb\x8dT\
\xff\xdf\x91U\xff\xcd\x86W\xff\x0d8\xb8\xffH_\xb6\
\xff\xe1\x98`\xff\xe0\x92W\xff\xd2\x85N\xff\xec\xd3\xc0\
\xff\xfe\xf2\xe9\xff\xfe\xf4\xea\xff\xd3\x98p\xff\xdb\x8eS\
\xff\xe0\x92W\xff\xdb\x8cQ\xff\xbdk7\xff\x8fB\x0e\
\xff\xec\xdc\xcd\xff\xed\xde\xcf\xff\xd3\xb2\x9f\xff\xd4\xb4\xa2\
\xff\xef\xe0\xd2\xff\xef\xe0\xd2\xff\xc7\x9ay\xff\xbb\x80W\
\xff\xea\xbf\xa1\xff\xea\xad}\xff\xdd\x97a\xff\xf0\xd5\xc0\
\xff\xff\xf0\xe2\xff\xff\xf1\xe4\xff\xdd\xa6}\xff\xe6\xa0h\
\xff\xe9\xa2i\xff\xe3\x97\x5c\xffcUu\xff\xac\x8b\x87\
\xff\xf0\xc2\x9f\xff\xea\xa9t\xff\xde\x98c\xff\xf0\xd5\xc0\
\xff\xff\xef\xe2\xff\xff\xf0\xe4\xff\xde\xa6~\xff\xe6\x9fh\
\xff\xe7\x9fh\xff\xda\x8eZ\xff\xabf3\xff\xcd\xa6\x89\
\xff\xef\xe0\xd2\xff\xef\xe0\xd2\xff\xd4\xb4\xa2\xff\xd5\xb6\xa5\
\xff\xf1\xe3\xd5\xff\xf1\xe3\xd5\xff\xf1\xe3\xd4\xff\xcb\x9bv\
\xff\xc4\x8dc\xff\xe3\xb1\x8c\xff\xe1\x9en\xff\xf1\xd2\xb7\
\xff\xfe\xeb\xda\xff\xff\xec\xdd\xff\xe5\xb0\x89\xff\xec\xaav\
\xff\xec\xa8t\xff\xd7\x8bX\xff\x92B\x0e\xff\x95G\x11\
\xff\xe0\xab\x86\xff\xf0\xbf\x9a\xff\xe8\xaf\x83\xff\xf2\xd6\xbd\
\xff\xff\xec\xdc\xff\xfe\xeb\xda\xff\xe3\xae\x86\xff\xe8\xa6s\
\xff\xde\x98g\xff\xbd|L\xff\xc1\x91m\xff\xf1\xe3\xd5\
\xff\xf1\xe3\xd5\xff\xf1\xe3\xd5\xff\xd5\xb6\xa5\xff\xd7\xb8\xa7\
\xff\xf3\xe6\xd8\xff\xf3\xe6\xd8\xff\xf3\xe6\xd8\xff\xf3\xe6\xd7\
\xff\xd7\xaf\x90\xff\xb9{L\xff\xc8\x86W\xff\xdd\xac\x88\
\xff\xf4\xd4\xb9\xff\xf5\xd7\xbc\xff\xe0\xa6~\xff\xda\x97h\
\xff\xbfxE\xff\x8c>\x08\xff\xc0\x95t\xff\xca\xa3\x86\
\xff\x8b>\x08\xff\xbctA\xff\xd6\x94e\xff\xea\xc2\xa5\
\xff\xf5\xd6\xbb\xff\xf4\xd4\xb9\xff\xd5\x98o\xff\xcb\x88Z\
\xff\xb0n=\xff\xc7\x9bz\xff\xf3\xe6\xd8\xff\xf3\xe6\xd8\
\xff\xf3\xe6\xd8\xff\xf3\xe6\xd8\xff\xd7\xb8\xa7\xff\xd8\xba\xaa\
\xff\xf5\xe8\xda\xff\xf5\xe8\xda\xff\xf5\xe8\xda\xff\xf5\xe8\xda\
\xff\xf5\xe8\xda\xff\xf5\xe5\xd5\xff\xd3\xa9\x89\xff\xbd\x85Y\
\xff\xb0o>\xff\xael;\xff\xa1\x5c)\xff\xa7i<\
\xff\xc2\x98x\xff\xf1\xe2\xd2\xff\xf5\xe8\xda\xff\xf5\xe8\xda\
\xff\xf4\xe6\xd7\xff\xc5\x9c}\xff\xa5f9\xff\x9aR\x1e\
\xff\xa4`-\xff\xa5a.\xff\xb1uI\xff\xca\x9f~\
\xff\xf1\xe2\xd2\xff\xf5\xe8\xda\xff\xf5\xe8\xda\xff\xf5\xe8\xda\
\xff\xf5\xe8\xda\xff\xf5\xe8\xda\xff\xd8\xba\xaa\xff\xd9\xbc\xac\
\xff\xf7\xea\xdd\xff\xf7\xea\xdd\xff\xf7\xea\xdd\xff\xf7\xea\xdd\
\xff\xf7\xea\xdd\xff\xf7\xea\xdd\xff\xf7\xea\xdd\xff\xf7\xea\xdd\
\xff\xf7\xea\xdd\xff\xf7\xea\xdd\xff\xf7\xea\xdd\xff\xf7\xea\xdd\
\xff\xf7\xea\xdd\xff\xf7\xea\xdd\xff\xf7\xea\xdd\xff\xf7\xea\xdd\
\xff\xf7\xea\xdd\xff\xf7\xea\xdd\xff\xf7\xea\xdd\xff\xf7\xea\xdd\
\xff\xf7\xea\xdd\xff\xf7\xea\xdd\xff\xf7\xea\xdd\xff\xf7\xea\xdd\
\xff\xf7\xea\xdd\xff\xf7\xea\xdd\xff\xf7\xea\xdd\xff\xf7\xea\xdd\
\xff\xf7\xea\xdd\xff\xf7\xea\xdd\xff\xd9\xbc\xac\xff\xdb\xbe\xaf\
\xff\xf8\xed\xdf\xff\xf8\xed\xdf\xff\xf8\xed\xdf\xff\xf8\xed\xdf\
\xff\xf8\xee\xe1\xff\xf9\xef\xe2\xff\xf9\xf0\xe3\xff\xf9\xf0\xe4\
\xff\xf9\xf0\xe4\xff\xf9\xf0\xe4\xff\xf9\xf0\xe4\xff\xf9\xf0\xe4\
\xff\xf9\xf0\xe4\xff\xf9\xf0\xe4\xff\xf9\xf0\xe4\xff\xf9\xf0\xe4\
\xff\xf9\xf0\xe4\xff\xf9\xf0\xe4\xff\xf9\xf0\xe4\xff\xf9\xf0\xe4\
\xff\xf9\xf0\xe4\xff\xf9\xf0\xe4\xff\xf9\xf0\xe4\xff\xf9\xef\xe3\
\xff\xf8\xef\xe2\xff\xf8\xed\xdf\xff\xf8\xed\xdf\xff\xf8\xed\xdf\
\xff\xf8\xed\xdf\xff\xf8\xed\xdf\xff\xdb\xbe\xaf\xff\xdb\xc0\xb1\
\xf0\xf6\xeb\xdd\xff\xfa\xf0\xe2\xff\xfa\xf0\xe2\xff\xfb\xf2\xe6\
\xff\xfb\xf2\xe7\xff\xfb\xf2\xe7\xff\xfb\xf2\xe7\xff\xfb\xf2\xe7\
\xff\xfb\xf2\xe7\xff\xfb\xf2\xe7\xff\xfb\xf2\xe7\xff\xfb\xf2\xe7\
\xff\xfb\xf2\xe7\xff\xfb\xf2\xe7\xff\xfb\xf2\xe7\xff\xfb\xf2\xe7\
\xff\xfb\xf2\xe7\xff\xfb\xf2\xe7\xff\xfb\xf2\xe7\xff\xfb\xf2\xe7\
\xff\xfb\xf2\xe7\xff\xfb\xf2\xe7\xff\xfb\xf2\xe7\xff\xfb\xf2\xe7\
\xff\xfb\xf2\xe7\xff\xfb\xf2\xe7\xff\xfa\xf0\xe4\xff\xfa\xef\xe1\
\xff\xfb\xf1\xe6\xff\xf6\xeb\xde\xff\xdb\xc0\xb1\xf0\xdd\xc1\xb3\
\xc3\xeb\xdb\xce\xff\xfe\xfb\xf6\xff\xfd\xf4\xea\xff\xfd\xf4\xe9\
\xff\xfd\xf4\xe9\xff\xfd\xf4\xe9\xff\xfd\xf4\xe9\xff\xfd\xf4\xe9\
\xff\xfd\xf4\xe9\xff\xfd\xf4\xe9\xff\xfd\xf4\xe9\xff\xfd\xf4\xe9\
\xff\xfd\xf4\xe9\xff\xfd\xf4\xe9\xff\xfd\xf4\xe9\xff\xfd\xf4\xe9\
\xff\xfd\xf4\xe9\xff\xfd\xf4\xe9\xff\xfd\xf4\xe9\xff\xfd\xf4\xe9\
\xff\xfd\xf4\xe9\xff\xfd\xf4\xe9\xff\xfd\xf4\xe9\xff\xfd\xf4\xe9\
\xff\xfd\xf4\xe9\xff\xfd\xf4\xe9\xff\xfd\xf4\xe9\xff\xfc\xf4\xea\
\xff\xfe\xfb\xf8\xff\xeb\xda\xcd\xff\xdc\xc1\xb3\xc0\xde\xc2\xb6\
?\xdd\xc3\xb5\xfc\xf8\xef\xe5\xff\xfe\xf7\xee\xff\xfe\xf6\xec\
\xff\xfe\xf6\xec\xff\xfe\xf6\xec\xff\xfe\xf6\xec\xff\xfe\xf6\xec\
\xff\xfe\xf6\xec\xff\xfe\xf6\xec\xff\xfe\xf6\xec\xff\xfe\xf6\xec\
\xff\xfe\xf6\xec\xff\xfe\xf6\xec\xff\xfe\xf6\xec\xff\xfe\xf6\xec\
\xff\xfe\xf6\xec\xff\xfe\xf6\xec\xff\xfe\xf6\xec\xff\xfe\xf6\xec\
\xff\xfe\xf6\xec\xff\xfe\xf6\xec\xff\xfe\xf6\xec\xff\xfe\xf6\xec\
\xff\xfe\xf6\xec\xff\xfe\xf6\xec\xff\xfe\xf6\xec\xff\xfe\xf7\xef\
\xff\xf7\xed\xe3\xff\xdd\xc3\xb5\xfc\xdd\xc3\xb6<\x00\x00\x00\
\x00\xdf\xc5\xb6x\xe2\xcc\xbd\xff\xf6\xf0\xeb\xff\xfd\xfa\xf8\
\xff\xff\xfc\xf9\xff\xff\xfc\xf9\xff\xff\xfc\xf9\xff\xff\xfc\xf9\
\xff\xff\xfc\xf9\xff\xff\xfc\xf9\xff\xff\xfc\xf9\xff\xff\xfc\xf9\
\xff\xff\xfc\xf9\xff\xff\xfc\xf9\xff\xff\xfc\xf9\xff\xff\xfc\xf9\
\xff\xff\xfc\xf9\xff\xff\xfc\xf9\xff\xff\xfc\xf9\xff\xff\xfc\xf9\
\xff\xff\xfc\xf9\xff\xff\xfc\xf9\xff\xff\xfc\xf9\xff\xff\xfc\xf9\
\xff\xff\xfc\xf9\xff\xff\xfc\xf9\xff\xfd\xfa\xf8\xff\xf4\xeb\xe4\
\xff\xe1\xcb\xbc\xff\xdf\xc5\xb6x\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\xde\xc6\xb6?\xdf\xc6\xb8\xb1\xe2\xca\xbc\
\xe6\xe5\xd2\xc7\xff\xe5\xd2\xc7\xff\xe5\xd2\xc7\xff\xe5\xd2\xc7\
\xff\xe5\xd2\xc7\xff\xe5\xd2\xc7\xff\xe5\xd2\xc7\xff\xe5\xd2\xc7\
\xff\xe5\xd2\xc7\xff\xe5\xd2\xc7\xff\xe5\xd2\xc7\xff\xe5\xd2\xc7\
\xff\xe5\xd2\xc7\xff\xe5\xd2\xc7\xff\xe5\xd2\xc7\xff\xe5\xd2\xc7\
\xff\xe5\xd2\xc7\xff\xe5\xd2\xc7\xff\xe5\xd2\xc7\xff\xe5\xd2\xc7\
\xff\xe5\xd2\xc7\xff\xe5\xd1\xc4\xff\xde\xc7\xb8\xe4\xdf\xc6\xb8\
\xb1\xde\xc6\xb6?\x00\x00\x00\x00\x00\x00\x00\x00(\x00\x00\
\x00\x10\x00\x00\x00 \x00\x00\x00\x01\x00 \x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\xb5I\x11-\xb4H\x11\x93\xb4H\x11\
\xd2\xb5I\x11\xff\xb5I\x11\xff\xb5H\x11\xcc\xb5I\x11\
\x87\xb1E\x0f!\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaaU\x00\
\x03\xb4H\x10{\xb5H\x11\xf9\xb6H\x07\xff\xbfT\x03\
\xff\xc5]\x00\xff\xc5]\x00\xff\xbfS\x03\xff\xb6H\x08\
\xff\xb4I\x10\xf3\xb4H\x11f\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaaU\x1c\x09\xb4I\x11\
\xc3\xcc\x80L\xff\xf2\xe0\xd0\xff\xbff\x1f\xff\xb7G\x00\
\xff\xc2X\x00\xff\xc2X\x00\xff\xb7G\x00\xff\xbff\x1f\
\xff\xec\xd2\xbe\xff\xca}I\xff\xb4I\x10\xab\xaaU\x00\
\x03\x00\x00\x00\x00\x00\x00\x00\x00\xb5H\x11\xa2\xd1\x8c[\
\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xc8y)\
\xff\xbaK\x00\xff\xbaK\x00\xff\xc8y)\xff\xff\xff\xff\
\xff\xff\xff\xff\xff\xff\xff\xff\xff\xcf\x88W\xff\xb5I\x11\
\x81\x00\x00\x00\x00\xb2G\x119\xb9O\x0d\xff\xd7\x93-\
\xff\xfa\xf5\xe8\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfa\xf3\xe8\
\xff\xb9R\x12\xff\xb9R\x12\xff\xfa\xf3\xe8\xff\xff\xff\xff\
\xff\xff\xff\xff\xff\xfa\xf5\xe8\xff\xd4\x8d+\xff\xb5H\x11\
\xf9\xb1E\x0f!\xb4H\x11\xa5\xc2[\x06\xff\xcae\x00\
\xff\xd1\x81\x1d\xff\xfa\xf4\xe9\xff\xff\xff\xff\xff\xff\xff\xff\
\xff\xdf\xb5y\xff\xdf\xb5y\xff\xff\xff\xff\xff\xff\xff\xff\
\xff\xfa\xf4\xe9\xff\xd1\x81\x1d\xff\xcae\x00\xff\xbfX\x09\
\xff\xb4H\x10~\xb5H\x10\xe4\xc9f\x02\xff\xcbi\x00\
\xff\xcbg\x00\xff\xd0\x82\x1f\xff\xff\xff\xff\xff\xff\xff\xff\
\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\
\xff\xd0\x82\x1f\xff\xcbg\x00\xff\xcbi\x00\xff\xc7d\x04\
\xff\xb5I\x10\xc9\xb6J\x11\xff\xcdm\x00\xff\xcck\x00\
\xff\xcck\x00\xff\xcbg\x00\xff\xe0\xb1\x5c\xff\xff\xff\xff\
\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe0\xb1\x5c\
\xff\xcbg\x00\xff\xcck\x00\xff\xcck\x00\xff\xccl\x01\
\xff\xb4I\x11\xea\xb5H\x11\xfc\xd1t\x00\xff\xcen\x00\
\xff\xcen\x00\xff\xcen\x00\xff\xcez\x18\xff\xff\xff\xff\
\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xcez\x18\
\xff\xcen\x00\xff\xcen\x00\xff\xcen\x00\xff\xd0s\x01\
\xff\xb4I\x11\xea\xb5H\x10\xe4\xd3z\x02\xff\xd0r\x00\
\xff\xcfp\x00\xff\xcfp\x00\xff\xcaf\x00\xff\xee\xd9\xb2\
\xff\xff\xff\xff\xff\xff\xff\xff\xff\xee\xd9\xb2\xff\xcaf\x00\
\xff\xcfp\x00\xff\xcfp\x00\xff\xd0r\x00\xff\xd0v\x04\
\xff\xb5I\x10\xc6\xb5H\x11\xa2\xcex\x06\xff\xd3}\x00\
\xff\xcft\x00\xff\xcfs\x00\xff\xcbj\x00\xff\xdf\xb5n\
\xff\xff\xff\xff\xff\xff\xff\xff\xff\xdf\xb5n\xff\xcbj\x00\
\xff\xcfs\x00\xff\xd0t\x00\xff\xd4\x7f\x00\xff\xc9o\x09\
\xff\xb4H\x10{\xb2G\x119\xc0^\x0d\xff\xdd\x91\x00\
\xff\xd5\x7f\x00\xff\xd2v\x00\xff\xcdl\x00\xff\xe0\xb7t\
\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe0\xb7t\xff\xcdl\x00\
\xff\xd2w\x00\xff\xd6\x81\x00\xff\xda\x8e\x02\xff\xb5H\x11\
\xf9\xb2L\x11\x1e\x00\x00\x00\x00\xb5I\x11\x9f\xcbw\x09\
\xff\xdf\x9c\x00\xff\xd8\x8a\x00\xff\xd1v\x00\xff\xe2\xbb|\
\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe2\xbb|\xff\xd1w\x00\
\xff\xd9\x8c\x00\xff\xe0\x9e\x00\xff\xcbu\x0a\xff\xb4H\x10\
~\x00\x00\x00\x00\x00\x00\x00\x00\xaaU\x1c\x09\xb4I\x11\
\xc3\xccx\x0a\xff\xdf\xa0\x02\xff\xdc\x91\x00\xff\xe7\xca\x98\
\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe7\xca\x98\xff\xdd\x92\x00\
\xff\xdc\x99\x03\xff\xcbv\x0a\xff\xb4I\x10\xab\xaaU\x00\
\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaaU\x00\
\x03\xb4H\x10{\xb5H\x11\xf9\xd3\x82\x07\xff\xe4\xbf\x9a\
\xff\xff\xff\xff\xff\xfd\xfb\xf9\xff\xe4\xbf\x9a\xff\xd0~\x08\
\xff\xb4I\x10\xf3\xb4H\x11f\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\xb5I\x11-\xb4H\x11\x93\xb5H\x11\
\xcc\xb5I\x11\xff\xb5H\x11\xf9\xb5H\x11\xcc\xb5I\x11\
\x87\xb1E\x0f!\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
"
qt_resource_name = b"\
\x00\x03\
\x00\x00p7\
\x00i\
\x00m\x00g\
\x00\x0e\
\x05\x0a\x18\x9f\
\x00f\
\x00a\x00v\x00i\x00c\x00o\x00n\x00-\x00r\x00r\x00.\x00i\x00c\x00o\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 60.484902
| 97
| 0.714298
| 7,894
| 34,053
| 3.078287
| 0.061439
| 0.835556
| 1.240741
| 1.64
| 0.766214
| 0.703909
| 0.692716
| 0.690494
| 0.684239
| 0.675844
| 0
| 0.289549
| 0.035092
| 34,053
| 562
| 98
| 60.592527
| 0.449997
| 0.005374
| 0
| 0.519196
| 0
| 0.95064
| 0
| 0
| 0
| 1
| 0.00024
| 0
| 0
| 1
| 0.003656
| false
| 0
| 0.001828
| 0
| 0.005484
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
074426db5a210444413b513ac3fd5c9dcaec57c2
| 59,342
|
py
|
Python
|
gr37/radio_jove/radio_jove_dual_rtlsdr_sigmf_3.py
|
zleffke/flowgraph_sandbox
|
6bcad45fd4585e917678b843be323278ebf06323
|
[
"MIT"
] | null | null | null |
gr37/radio_jove/radio_jove_dual_rtlsdr_sigmf_3.py
|
zleffke/flowgraph_sandbox
|
6bcad45fd4585e917678b843be323278ebf06323
|
[
"MIT"
] | null | null | null |
gr37/radio_jove/radio_jove_dual_rtlsdr_sigmf_3.py
|
zleffke/flowgraph_sandbox
|
6bcad45fd4585e917678b843be323278ebf06323
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Radio Jove RTL-SDR Receiver
# Author: Zach Leffke, KJ4QLP
# Description: Receive Jupiter Emissions with RTL-SDR on 20.1MHz
# GNU Radio version: 3.7.13.4
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from datetime import datetime as dt; import string; import math
from gnuradio import analog
from gnuradio import audio
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from gnuradio.qtgui import Range, RangeWidget
from optparse import OptionParser
import osmosdr
import sip
import sys
import time
from gnuradio import qtgui
class radio_jove_dual_rtlsdr_sigmf_3(gr.top_block, Qt.QWidget):
def __init__(self, path="/captures/radio_jove", signal_type='RADIO-JOVE'):
gr.top_block.__init__(self, "Radio Jove RTL-SDR Receiver")
Qt.QWidget.__init__(self)
self.setWindowTitle("Radio Jove RTL-SDR Receiver")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "radio_jove_dual_rtlsdr_sigmf_3")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Parameters
##################################################
self.path = path
self.signal_type = signal_type
##################################################
# Variables
##################################################
self.ts_str = ts_str = dt.strftime(dt.utcnow(), "%Y-%m-%dT%H:%M:%SZ")
self.samp_rate = samp_rate = 2048000
self.pol = pol = "linear"
self.interp_2 = interp_2 = 48
self.interp_1 = interp_1 = 1000
self.decim_2 = decim_2 = 500
self.decim_1 = decim_1 = 2048*2
self.antenna_1 = antenna_1 = "EW"
self.antenna_0 = antenna_0 = "NS"
self.phase_shift_rad = phase_shift_rad = math.pi/2
self.fn_wav = fn_wav = "{:s}_{:s}_{:s}.wav".format(signal_type.upper(), pol.upper(),ts_str)
self.fn_1 = fn_1 = "{:s}_{:s}_{:s}".format(signal_type.upper(), antenna_1.upper(),ts_str)
self.fn_0 = fn_0 = "{:s}_{:s}_{:s}".format(signal_type.upper(), antenna_0.upper(),ts_str)
self.audio_rate_in = audio_rate_in = samp_rate/(decim_1*decim_2) *(interp_1*interp_2)
self.vol_right = vol_right = 100
self.vol_left = vol_left = 100
self.rx_freq = rx_freq = 20.125e6
self.phase_shift_complex = phase_shift_complex = complex(math.cos(phase_shift_rad),math.sin(phase_shift_rad))
self.offset = offset = samp_rate/4
self.lpf_cut = lpf_cut = 20e3
self.iir_alpha = iir_alpha = 0.5
self.fp_wav = fp_wav = "{:s}/{:s}".format(path, fn_wav)
self.fp_1 = fp_1 = "{:s}/{:s}".format(path, fn_1)
self.fp_0 = fp_0 = "{:s}/{:s}".format(path, fn_0)
self.audio_rate_out = audio_rate_out = audio_rate_in/3
self.audio_lpf_cut = audio_lpf_cut = 5e3
##################################################
# Blocks
##################################################
self.main_tab = Qt.QTabWidget()
self.main_tab_widget_0 = Qt.QWidget()
self.main_tab_layout_0 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.main_tab_widget_0)
self.main_tab_grid_layout_0 = Qt.QGridLayout()
self.main_tab_layout_0.addLayout(self.main_tab_grid_layout_0)
self.main_tab.addTab(self.main_tab_widget_0, 'Channel')
self.main_tab_widget_1 = Qt.QWidget()
self.main_tab_layout_1 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.main_tab_widget_1)
self.main_tab_grid_layout_1 = Qt.QGridLayout()
self.main_tab_layout_1.addLayout(self.main_tab_grid_layout_1)
self.main_tab.addTab(self.main_tab_widget_1, 'Filter + Sense')
self.main_tab_widget_2 = Qt.QWidget()
self.main_tab_layout_2 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.main_tab_widget_2)
self.main_tab_grid_layout_2 = Qt.QGridLayout()
self.main_tab_layout_2.addLayout(self.main_tab_grid_layout_2)
self.main_tab.addTab(self.main_tab_widget_2, 'Audio')
self.main_tab_widget_3 = Qt.QWidget()
self.main_tab_layout_3 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom, self.main_tab_widget_3)
self.main_tab_grid_layout_3 = Qt.QGridLayout()
self.main_tab_layout_3.addLayout(self.main_tab_grid_layout_3)
self.main_tab.addTab(self.main_tab_widget_3, 'Power')
self.top_grid_layout.addWidget(self.main_tab, 1, 0, 1, 1)
for r in range(1, 2):
self.top_grid_layout.setRowStretch(r, 1)
for c in range(0, 1):
self.top_grid_layout.setColumnStretch(c, 1)
self._vol_right_range = Range(0, 200, .1, 100, 200)
self._vol_right_win = RangeWidget(self._vol_right_range, self.set_vol_right, "vol_right", "counter_slider", float)
self.main_tab_grid_layout_2.addWidget(self._vol_right_win, 8, 0, 1, 4)
for r in range(8, 9):
self.main_tab_grid_layout_2.setRowStretch(r, 1)
for c in range(0, 4):
self.main_tab_grid_layout_2.setColumnStretch(c, 1)
self._vol_left_range = Range(0, 200, .1, 100, 200)
self._vol_left_win = RangeWidget(self._vol_left_range, self.set_vol_left, "vol_left", "counter_slider", float)
self.main_tab_grid_layout_2.addWidget(self._vol_left_win, 7, 0, 1, 4)
for r in range(7, 8):
self.main_tab_grid_layout_2.setRowStretch(r, 1)
for c in range(0, 4):
self.main_tab_grid_layout_2.setColumnStretch(c, 1)
self._rx_freq_tool_bar = Qt.QToolBar(self)
self._rx_freq_tool_bar.addWidget(Qt.QLabel('Freq [Hz]'+": "))
self._rx_freq_line_edit = Qt.QLineEdit(str(self.rx_freq))
self._rx_freq_tool_bar.addWidget(self._rx_freq_line_edit)
self._rx_freq_line_edit.returnPressed.connect(
lambda: self.set_rx_freq(eng_notation.str_to_num(str(self._rx_freq_line_edit.text().toAscii()))))
self.main_tab_grid_layout_0.addWidget(self._rx_freq_tool_bar, 4, 0, 1, 2)
for r in range(4, 5):
self.main_tab_grid_layout_0.setRowStretch(r, 1)
for c in range(0, 2):
self.main_tab_grid_layout_0.setColumnStretch(c, 1)
self._iir_alpha_range = Range(0, 1, .001, 0.5, 200)
self._iir_alpha_win = RangeWidget(self._iir_alpha_range, self.set_iir_alpha, "iir_alpha", "counter_slider", float)
self.main_tab_grid_layout_3.addWidget(self._iir_alpha_win, 8, 4, 1, 4)
for r in range(8, 9):
self.main_tab_grid_layout_3.setRowStretch(r, 1)
for c in range(4, 8):
self.main_tab_grid_layout_3.setColumnStretch(c, 1)
self._audio_lpf_cut_tool_bar = Qt.QToolBar(self)
self._audio_lpf_cut_tool_bar.addWidget(Qt.QLabel("audio_lpf_cut"+": "))
self._audio_lpf_cut_line_edit = Qt.QLineEdit(str(self.audio_lpf_cut))
self._audio_lpf_cut_tool_bar.addWidget(self._audio_lpf_cut_line_edit)
self._audio_lpf_cut_line_edit.returnPressed.connect(
lambda: self.set_audio_lpf_cut(eng_notation.str_to_num(str(self._audio_lpf_cut_line_edit.text().toAscii()))))
self.main_tab_grid_layout_2.addWidget(self._audio_lpf_cut_tool_bar, 8, 5, 1, 1)
for r in range(8, 9):
self.main_tab_grid_layout_2.setRowStretch(r, 1)
for c in range(5, 6):
self.main_tab_grid_layout_2.setColumnStretch(c, 1)
self.single_pole_iir_filter_xx_0_1 = filter.single_pole_iir_filter_ff(iir_alpha, 1)
self.single_pole_iir_filter_xx_0_0_0 = filter.single_pole_iir_filter_ff(iir_alpha, 1)
self.single_pole_iir_filter_xx_0_0 = filter.single_pole_iir_filter_ff(iir_alpha, 1)
self.single_pole_iir_filter_xx_0 = filter.single_pole_iir_filter_ff(iir_alpha, 1)
self.rational_resampler_xxx_2_0 = filter.rational_resampler_ccc(
interpolation=interp_2,
decimation=decim_2,
taps=None,
fractional_bw=None,
)
self.rational_resampler_xxx_2 = filter.rational_resampler_ccc(
interpolation=interp_1,
decimation=decim_1,
taps=None,
fractional_bw=None,
)
self.rational_resampler_xxx_1_0 = filter.rational_resampler_ccc(
interpolation=interp_2,
decimation=decim_2,
taps=None,
fractional_bw=None,
)
self.rational_resampler_xxx_1 = filter.rational_resampler_ccc(
interpolation=interp_1,
decimation=decim_1,
taps=None,
fractional_bw=None,
)
self.qtgui_waterfall_sink_x_1_0 = qtgui.waterfall_sink_f(
1024, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
audio_rate_out, #bw
"", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_1_0.set_update_time(0.010)
self.qtgui_waterfall_sink_x_1_0.enable_grid(False)
self.qtgui_waterfall_sink_x_1_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_1_0.disable_legend()
if "float" == "float" or "float" == "msg_float":
self.qtgui_waterfall_sink_x_1_0.set_plot_pos_half(not False)
labels = ['NS-Left', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_1_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_1_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_1_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_1_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_1_0.set_intensity_range(-140, 10)
self._qtgui_waterfall_sink_x_1_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_1_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_2.addWidget(self._qtgui_waterfall_sink_x_1_0_win, 4, 0, 2, 4)
for r in range(4, 6):
self.main_tab_grid_layout_2.setRowStretch(r, 1)
for c in range(0, 4):
self.main_tab_grid_layout_2.setColumnStretch(c, 1)
self.qtgui_waterfall_sink_x_0_1_0 = qtgui.waterfall_sink_c(
2048, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
audio_rate_out, #bw
"RHCP", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0_1_0.set_update_time(0.010)
self.qtgui_waterfall_sink_x_0_1_0.enable_grid(False)
self.qtgui_waterfall_sink_x_0_1_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0_1_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0_1_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0_1_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0_1_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0_1_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0_1_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0_1_0.set_intensity_range(-140, 10)
self._qtgui_waterfall_sink_x_0_1_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0_1_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_1.addWidget(self._qtgui_waterfall_sink_x_0_1_0_win, 4, 4, 2, 4)
for r in range(4, 6):
self.main_tab_grid_layout_1.setRowStretch(r, 1)
for c in range(4, 8):
self.main_tab_grid_layout_1.setColumnStretch(c, 1)
self.qtgui_waterfall_sink_x_0_1 = qtgui.waterfall_sink_c(
2048, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate/(decim_1*decim_2) *(interp_1*interp_2), #bw
"N/S", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0_1.set_update_time(0.010)
self.qtgui_waterfall_sink_x_0_1.enable_grid(False)
self.qtgui_waterfall_sink_x_0_1.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0_1.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0_1.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0_1.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0_1.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0_1.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0_1.set_intensity_range(-140, 10)
self._qtgui_waterfall_sink_x_0_1_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0_1.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_1.addWidget(self._qtgui_waterfall_sink_x_0_1_win, 4, 0, 2, 4)
for r in range(4, 6):
self.main_tab_grid_layout_1.setRowStretch(r, 1)
for c in range(0, 4):
self.main_tab_grid_layout_1.setColumnStretch(c, 1)
self.qtgui_waterfall_sink_x_0_0_0_0 = qtgui.waterfall_sink_c(
2048, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
audio_rate_out, #bw
"LHCP", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0_0_0_0.set_update_time(0.010)
self.qtgui_waterfall_sink_x_0_0_0_0.enable_grid(False)
self.qtgui_waterfall_sink_x_0_0_0_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0_0_0_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0_0_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0_0_0_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0_0_0_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0_0_0_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0_0_0_0.set_intensity_range(-140, 10)
self._qtgui_waterfall_sink_x_0_0_0_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0_0_0_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_1.addWidget(self._qtgui_waterfall_sink_x_0_0_0_0_win, 6, 4, 2, 4)
for r in range(6, 8):
self.main_tab_grid_layout_1.setRowStretch(r, 1)
for c in range(4, 8):
self.main_tab_grid_layout_1.setColumnStretch(c, 1)
self.qtgui_waterfall_sink_x_0_0_0 = qtgui.waterfall_sink_c(
2048, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate/(decim_1*decim_2) *(interp_1*interp_2), #bw
"E/W", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0_0_0.set_update_time(0.010)
self.qtgui_waterfall_sink_x_0_0_0.enable_grid(False)
self.qtgui_waterfall_sink_x_0_0_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0_0_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0_0_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0_0_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0_0_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0_0_0.set_intensity_range(-140, 10)
self._qtgui_waterfall_sink_x_0_0_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0_0_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_1.addWidget(self._qtgui_waterfall_sink_x_0_0_0_win, 6, 0, 2, 4)
for r in range(6, 8):
self.main_tab_grid_layout_1.setRowStretch(r, 1)
for c in range(0, 4):
self.main_tab_grid_layout_1.setColumnStretch(c, 1)
self.qtgui_waterfall_sink_x_0_0 = qtgui.waterfall_sink_c(
2048, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
rx_freq, #fc
samp_rate/decim_1*interp_1, #bw
"E/W", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0_0.set_update_time(0.010)
self.qtgui_waterfall_sink_x_0_0.enable_grid(False)
self.qtgui_waterfall_sink_x_0_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0_0.set_intensity_range(-140, 10)
self._qtgui_waterfall_sink_x_0_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_0.addWidget(self._qtgui_waterfall_sink_x_0_0_win, 2, 4, 2, 4)
for r in range(2, 4):
self.main_tab_grid_layout_0.setRowStretch(r, 1)
for c in range(4, 8):
self.main_tab_grid_layout_0.setColumnStretch(c, 1)
self.qtgui_waterfall_sink_x_0 = qtgui.waterfall_sink_c(
2048, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
rx_freq, #fc
samp_rate/decim_1*interp_1, #bw
"N/S", #name
1 #number of inputs
)
self.qtgui_waterfall_sink_x_0.set_update_time(0.010)
self.qtgui_waterfall_sink_x_0.enable_grid(False)
self.qtgui_waterfall_sink_x_0.enable_axis_labels(True)
if not True:
self.qtgui_waterfall_sink_x_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_waterfall_sink_x_0.set_plot_pos_half(not True)
labels = ['', '', '', '', '',
'', '', '', '', '']
colors = [0, 0, 0, 0, 0,
0, 0, 0, 0, 0]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_waterfall_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_waterfall_sink_x_0.set_line_label(i, labels[i])
self.qtgui_waterfall_sink_x_0.set_color_map(i, colors[i])
self.qtgui_waterfall_sink_x_0.set_line_alpha(i, alphas[i])
self.qtgui_waterfall_sink_x_0.set_intensity_range(-140, 10)
self._qtgui_waterfall_sink_x_0_win = sip.wrapinstance(self.qtgui_waterfall_sink_x_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_0.addWidget(self._qtgui_waterfall_sink_x_0_win, 0, 4, 2, 4)
for r in range(0, 2):
self.main_tab_grid_layout_0.setRowStretch(r, 1)
for c in range(4, 8):
self.main_tab_grid_layout_0.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0_0 = qtgui.time_sink_f(
1024, #size
audio_rate_out, #samp_rate
"", #name
2 #number of inputs
)
self.qtgui_time_sink_x_0_0.set_update_time(0.10)
self.qtgui_time_sink_x_0_0.set_y_axis(-1, 1)
self.qtgui_time_sink_x_0_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_0_0.enable_autoscale(False)
self.qtgui_time_sink_x_0_0.enable_grid(True)
self.qtgui_time_sink_x_0_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0_0.enable_control_panel(False)
self.qtgui_time_sink_x_0_0.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0_0.disable_legend()
labels = ['NS - Left', 'EW - Right', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_2.addWidget(self._qtgui_time_sink_x_0_0_win, 7, 4, 1, 4)
for r in range(7, 8):
self.main_tab_grid_layout_2.setRowStretch(r, 1)
for c in range(4, 8):
self.main_tab_grid_layout_2.setColumnStretch(c, 1)
self.qtgui_time_sink_x_0 = qtgui.time_sink_f(
1000, #size
samp_rate, #samp_rate
"", #name
4 #number of inputs
)
self.qtgui_time_sink_x_0.set_update_time(0.1)
self.qtgui_time_sink_x_0.set_y_axis(-110, -10)
self.qtgui_time_sink_x_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_0.enable_autoscale(False)
self.qtgui_time_sink_x_0.enable_grid(True)
self.qtgui_time_sink_x_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0.enable_control_panel(False)
self.qtgui_time_sink_x_0.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0.disable_legend()
labels = ['LHCP', 'RHCP', 'NS', 'EW', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(4):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_3.addWidget(self._qtgui_time_sink_x_0_win, 0, 4, 4, 4)
for r in range(0, 4):
self.main_tab_grid_layout_3.setRowStretch(r, 1)
for c in range(4, 8):
self.main_tab_grid_layout_3.setColumnStretch(c, 1)
self.qtgui_number_sink_0 = qtgui.number_sink(
gr.sizeof_float,
0,
qtgui.NUM_GRAPH_HORIZ,
4
)
self.qtgui_number_sink_0.set_update_time(0.010)
self.qtgui_number_sink_0.set_title('RSSI')
labels = ['LHCP', 'RHCP', 'NS', 'EW', '',
'', '', '', '', '']
units = ['', '', '', '', '',
'', '', '', '', '']
colors = [("blue", "red"), ("blue", "red"), ("black", "black"), ("black", "black"), ("black", "black"),
("black", "black"), ("black", "black"), ("black", "black"), ("black", "black"), ("black", "black")]
factor = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
for i in xrange(4):
self.qtgui_number_sink_0.set_min(i, -140)
self.qtgui_number_sink_0.set_max(i, -40)
self.qtgui_number_sink_0.set_color(i, colors[i][0], colors[i][1])
if len(labels[i]) == 0:
self.qtgui_number_sink_0.set_label(i, "Data {0}".format(i))
else:
self.qtgui_number_sink_0.set_label(i, labels[i])
self.qtgui_number_sink_0.set_unit(i, units[i])
self.qtgui_number_sink_0.set_factor(i, factor[i])
self.qtgui_number_sink_0.enable_autoscale(False)
self._qtgui_number_sink_0_win = sip.wrapinstance(self.qtgui_number_sink_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_3.addWidget(self._qtgui_number_sink_0_win, 4, 4, 1, 4)
for r in range(4, 5):
self.main_tab_grid_layout_3.setRowStretch(r, 1)
for c in range(4, 8):
self.main_tab_grid_layout_3.setColumnStretch(c, 1)
self.qtgui_freq_sink_x_1 = qtgui.freq_sink_f(
2048, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
audio_rate_out, #bw
"Audio Spectrum", #name
2 #number of inputs
)
self.qtgui_freq_sink_x_1.set_update_time(0.010)
self.qtgui_freq_sink_x_1.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_1.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_1.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_1.enable_autoscale(False)
self.qtgui_freq_sink_x_1.enable_grid(False)
self.qtgui_freq_sink_x_1.set_fft_average(1.0)
self.qtgui_freq_sink_x_1.enable_axis_labels(True)
self.qtgui_freq_sink_x_1.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_1.disable_legend()
if "float" == "float" or "float" == "msg_float":
self.qtgui_freq_sink_x_1.set_plot_pos_half(not False)
labels = ['NS-Left', 'EW-Right', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_1.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_1.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_1.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_1.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_1.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_1_win = sip.wrapinstance(self.qtgui_freq_sink_x_1.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_2.addWidget(self._qtgui_freq_sink_x_1_win, 0, 0, 4, 8)
for r in range(0, 4):
self.main_tab_grid_layout_2.setRowStretch(r, 1)
for c in range(0, 8):
self.main_tab_grid_layout_2.setColumnStretch(c, 1)
self.qtgui_freq_sink_x_0_0_0 = qtgui.freq_sink_c(
2048, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
audio_rate_out, #bw
"", #name
2 #number of inputs
)
self.qtgui_freq_sink_x_0_0_0.set_update_time(0.010)
self.qtgui_freq_sink_x_0_0_0.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_0_0_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0_0_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0_0_0.enable_grid(True)
self.qtgui_freq_sink_x_0_0_0.set_fft_average(1.0)
self.qtgui_freq_sink_x_0_0_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0_0_0.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_0_0_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0_0_0.set_plot_pos_half(not True)
labels = ['RHCP', 'LHCP', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0_0_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0_0_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0_0_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0_0_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_0_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0_0_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_1.addWidget(self._qtgui_freq_sink_x_0_0_0_win, 0, 4, 4, 4)
for r in range(0, 4):
self.main_tab_grid_layout_1.setRowStretch(r, 1)
for c in range(4, 8):
self.main_tab_grid_layout_1.setColumnStretch(c, 1)
self.qtgui_freq_sink_x_0_0 = qtgui.freq_sink_c(
2048, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate/(decim_1*decim_2) *(interp_1*interp_2), #bw
"", #name
2 #number of inputs
)
self.qtgui_freq_sink_x_0_0.set_update_time(0.010)
self.qtgui_freq_sink_x_0_0.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_0_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0_0.enable_grid(True)
self.qtgui_freq_sink_x_0_0.set_fft_average(1.0)
self.qtgui_freq_sink_x_0_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0_0.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_0_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0_0.set_plot_pos_half(not True)
labels = ['N/S', 'E/W', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_1.addWidget(self._qtgui_freq_sink_x_0_0_win, 0, 0, 4, 4)
for r in range(0, 4):
self.main_tab_grid_layout_1.setRowStretch(r, 1)
for c in range(0, 4):
self.main_tab_grid_layout_1.setColumnStretch(c, 1)
self.qtgui_freq_sink_x_0 = qtgui.freq_sink_c(
2048, #size
firdes.WIN_BLACKMAN_hARRIS, #wintype
rx_freq*0, #fc
samp_rate / decim_1 *interp_1, #bw
"", #name
2 #number of inputs
)
self.qtgui_freq_sink_x_0.set_update_time(0.010)
self.qtgui_freq_sink_x_0.set_y_axis(-140, 10)
self.qtgui_freq_sink_x_0.set_y_label('Relative Gain', 'dB')
self.qtgui_freq_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0, "")
self.qtgui_freq_sink_x_0.enable_autoscale(False)
self.qtgui_freq_sink_x_0.enable_grid(True)
self.qtgui_freq_sink_x_0.set_fft_average(0.05)
self.qtgui_freq_sink_x_0.enable_axis_labels(True)
self.qtgui_freq_sink_x_0.enable_control_panel(False)
if not True:
self.qtgui_freq_sink_x_0.disable_legend()
if "complex" == "float" or "complex" == "msg_float":
self.qtgui_freq_sink_x_0.set_plot_pos_half(not True)
labels = ['N/S', 'E/W', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "dark blue"]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
self.qtgui_freq_sink_x_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_freq_sink_x_0.set_line_label(i, labels[i])
self.qtgui_freq_sink_x_0.set_line_width(i, widths[i])
self.qtgui_freq_sink_x_0.set_line_color(i, colors[i])
self.qtgui_freq_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_freq_sink_x_0_win = sip.wrapinstance(self.qtgui_freq_sink_x_0.pyqwidget(), Qt.QWidget)
self.main_tab_grid_layout_0.addWidget(self._qtgui_freq_sink_x_0_win, 0, 0, 4, 4)
for r in range(0, 4):
self.main_tab_grid_layout_0.setRowStretch(r, 1)
for c in range(0, 4):
self.main_tab_grid_layout_0.setColumnStretch(c, 1)
self.osmosdr_source_0_0 = osmosdr.source( args="numchan=" + str(1) + " " + "rtl=1,direct_samp=2" )
self.osmosdr_source_0_0.set_sample_rate(samp_rate)
self.osmosdr_source_0_0.set_center_freq(rx_freq-offset, 0)
self.osmosdr_source_0_0.set_freq_corr(0, 0)
self.osmosdr_source_0_0.set_dc_offset_mode(0, 0)
self.osmosdr_source_0_0.set_iq_balance_mode(0, 0)
self.osmosdr_source_0_0.set_gain_mode(False, 0)
self.osmosdr_source_0_0.set_gain(10, 0)
self.osmosdr_source_0_0.set_if_gain(20, 0)
self.osmosdr_source_0_0.set_bb_gain(20, 0)
self.osmosdr_source_0_0.set_antenna('', 0)
self.osmosdr_source_0_0.set_bandwidth(0, 0)
self.osmosdr_source_0 = osmosdr.source( args="numchan=" + str(1) + " " + "rtl=0,direct_samp=2" )
self.osmosdr_source_0.set_sample_rate(samp_rate)
self.osmosdr_source_0.set_center_freq(rx_freq - offset, 0)
self.osmosdr_source_0.set_freq_corr(0, 0)
self.osmosdr_source_0.set_dc_offset_mode(0, 0)
self.osmosdr_source_0.set_iq_balance_mode(0, 0)
self.osmosdr_source_0.set_gain_mode(False, 0)
self.osmosdr_source_0.set_gain(20, 0)
self.osmosdr_source_0.set_if_gain(20, 0)
self.osmosdr_source_0.set_bb_gain(20, 0)
self.osmosdr_source_0.set_antenna('', 0)
self.osmosdr_source_0.set_bandwidth(0, 0)
self._lpf_cut_tool_bar = Qt.QToolBar(self)
self._lpf_cut_tool_bar.addWidget(Qt.QLabel('Freq [Hz]'+": "))
self._lpf_cut_line_edit = Qt.QLineEdit(str(self.lpf_cut))
self._lpf_cut_tool_bar.addWidget(self._lpf_cut_line_edit)
self._lpf_cut_line_edit.returnPressed.connect(
lambda: self.set_lpf_cut(eng_notation.str_to_num(str(self._lpf_cut_line_edit.text().toAscii()))))
self.main_tab_grid_layout_1.addWidget(self._lpf_cut_tool_bar, 8, 0, 1, 2)
for r in range(8, 9):
self.main_tab_grid_layout_1.setRowStretch(r, 1)
for c in range(0, 2):
self.main_tab_grid_layout_1.setColumnStretch(c, 1)
self.low_pass_filter_0_1_0 = filter.fir_filter_ccf(int(audio_rate_in/audio_rate_out), firdes.low_pass(
1, audio_rate_in, audio_lpf_cut, 500, firdes.WIN_HAMMING, 6.76))
self.low_pass_filter_0_1 = filter.fir_filter_ccf(int(audio_rate_in/audio_rate_out), firdes.low_pass(
1, audio_rate_in, audio_lpf_cut, 500, firdes.WIN_HAMMING, 6.76))
self.blocks_nlog10_ff_0_0_0_0 = blocks.nlog10_ff(10, 1, 0)
self.blocks_nlog10_ff_0_0_0 = blocks.nlog10_ff(10, 1, 0)
self.blocks_nlog10_ff_0_0 = blocks.nlog10_ff(10, 1, 0)
self.blocks_nlog10_ff_0 = blocks.nlog10_ff(10, 1, 0)
self.blocks_multiply_xx_0_0 = blocks.multiply_vcc(1)
self.blocks_multiply_xx_0 = blocks.multiply_vcc(1)
self.blocks_multiply_const_vxx_1_0_1 = blocks.multiply_const_vcc((complex(0,1), ))
self.blocks_multiply_const_vxx_1_0_0_0 = blocks.multiply_const_vcc((complex(0,-1), ))
self.blocks_multiply_const_vxx_0_0 = blocks.multiply_const_vff((vol_right, ))
self.blocks_multiply_const_vxx_0 = blocks.multiply_const_vff((vol_left, ))
self.blocks_complex_to_real_0_0_0 = blocks.complex_to_real(1)
self.blocks_complex_to_real_0_0 = blocks.complex_to_real(1)
self.blocks_complex_to_mag_squared_0_1 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_0_0_0 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_0_0 = blocks.complex_to_mag_squared(1)
self.blocks_complex_to_mag_squared_0 = blocks.complex_to_mag_squared(1)
self.blocks_add_xx_0_0 = blocks.add_vcc(1)
self.blocks_add_xx_0 = blocks.add_vcc(1)
self.audio_sink_0 = audio.sink(16000, '', True)
self.analog_sig_source_x_0 = analog.sig_source_c(samp_rate, analog.GR_COS_WAVE, -1 * (offset), 1, 0)
##################################################
# Connections
##################################################
self.connect((self.analog_sig_source_x_0, 0), (self.blocks_multiply_xx_0, 1))
self.connect((self.analog_sig_source_x_0, 0), (self.blocks_multiply_xx_0_0, 1))
self.connect((self.blocks_add_xx_0, 0), (self.blocks_complex_to_mag_squared_0_0, 0))
self.connect((self.blocks_add_xx_0, 0), (self.qtgui_freq_sink_x_0_0_0, 0))
self.connect((self.blocks_add_xx_0, 0), (self.qtgui_waterfall_sink_x_0_1_0, 0))
self.connect((self.blocks_add_xx_0_0, 0), (self.blocks_complex_to_mag_squared_0, 0))
self.connect((self.blocks_add_xx_0_0, 0), (self.qtgui_freq_sink_x_0_0_0, 1))
self.connect((self.blocks_add_xx_0_0, 0), (self.qtgui_waterfall_sink_x_0_0_0_0, 0))
self.connect((self.blocks_complex_to_mag_squared_0, 0), (self.single_pole_iir_filter_xx_0, 0))
self.connect((self.blocks_complex_to_mag_squared_0_0, 0), (self.single_pole_iir_filter_xx_0_0, 0))
self.connect((self.blocks_complex_to_mag_squared_0_0_0, 0), (self.single_pole_iir_filter_xx_0_1, 0))
self.connect((self.blocks_complex_to_mag_squared_0_1, 0), (self.single_pole_iir_filter_xx_0_0_0, 0))
self.connect((self.blocks_complex_to_real_0_0, 0), (self.blocks_multiply_const_vxx_0, 0))
self.connect((self.blocks_complex_to_real_0_0_0, 0), (self.blocks_multiply_const_vxx_0_0, 0))
self.connect((self.blocks_multiply_const_vxx_0, 0), (self.audio_sink_0, 0))
self.connect((self.blocks_multiply_const_vxx_0, 0), (self.qtgui_freq_sink_x_1, 0))
self.connect((self.blocks_multiply_const_vxx_0, 0), (self.qtgui_time_sink_x_0_0, 0))
self.connect((self.blocks_multiply_const_vxx_0, 0), (self.qtgui_waterfall_sink_x_1_0, 0))
self.connect((self.blocks_multiply_const_vxx_0_0, 0), (self.audio_sink_0, 1))
self.connect((self.blocks_multiply_const_vxx_0_0, 0), (self.qtgui_freq_sink_x_1, 1))
self.connect((self.blocks_multiply_const_vxx_0_0, 0), (self.qtgui_time_sink_x_0_0, 1))
self.connect((self.blocks_multiply_const_vxx_1_0_0_0, 0), (self.blocks_add_xx_0_0, 1))
self.connect((self.blocks_multiply_const_vxx_1_0_1, 0), (self.blocks_add_xx_0, 1))
self.connect((self.blocks_multiply_xx_0, 0), (self.rational_resampler_xxx_1, 0))
self.connect((self.blocks_multiply_xx_0_0, 0), (self.rational_resampler_xxx_2, 0))
self.connect((self.blocks_nlog10_ff_0, 0), (self.qtgui_number_sink_0, 0))
self.connect((self.blocks_nlog10_ff_0, 0), (self.qtgui_time_sink_x_0, 0))
self.connect((self.blocks_nlog10_ff_0_0, 0), (self.qtgui_number_sink_0, 1))
self.connect((self.blocks_nlog10_ff_0_0, 0), (self.qtgui_time_sink_x_0, 1))
self.connect((self.blocks_nlog10_ff_0_0_0, 0), (self.qtgui_number_sink_0, 3))
self.connect((self.blocks_nlog10_ff_0_0_0, 0), (self.qtgui_time_sink_x_0, 3))
self.connect((self.blocks_nlog10_ff_0_0_0_0, 0), (self.qtgui_number_sink_0, 2))
self.connect((self.blocks_nlog10_ff_0_0_0_0, 0), (self.qtgui_time_sink_x_0, 2))
self.connect((self.low_pass_filter_0_1, 0), (self.blocks_add_xx_0, 0))
self.connect((self.low_pass_filter_0_1, 0), (self.blocks_add_xx_0_0, 0))
self.connect((self.low_pass_filter_0_1, 0), (self.blocks_complex_to_mag_squared_0_0_0, 0))
self.connect((self.low_pass_filter_0_1, 0), (self.blocks_complex_to_real_0_0, 0))
self.connect((self.low_pass_filter_0_1_0, 0), (self.blocks_complex_to_mag_squared_0_1, 0))
self.connect((self.low_pass_filter_0_1_0, 0), (self.blocks_complex_to_real_0_0_0, 0))
self.connect((self.low_pass_filter_0_1_0, 0), (self.blocks_multiply_const_vxx_1_0_0_0, 0))
self.connect((self.low_pass_filter_0_1_0, 0), (self.blocks_multiply_const_vxx_1_0_1, 0))
self.connect((self.osmosdr_source_0, 0), (self.blocks_multiply_xx_0, 0))
self.connect((self.osmosdr_source_0_0, 0), (self.blocks_multiply_xx_0_0, 0))
self.connect((self.rational_resampler_xxx_1, 0), (self.qtgui_freq_sink_x_0, 0))
self.connect((self.rational_resampler_xxx_1, 0), (self.qtgui_waterfall_sink_x_0, 0))
self.connect((self.rational_resampler_xxx_1, 0), (self.rational_resampler_xxx_1_0, 0))
self.connect((self.rational_resampler_xxx_1_0, 0), (self.low_pass_filter_0_1, 0))
self.connect((self.rational_resampler_xxx_1_0, 0), (self.qtgui_freq_sink_x_0_0, 0))
self.connect((self.rational_resampler_xxx_1_0, 0), (self.qtgui_waterfall_sink_x_0_1, 0))
self.connect((self.rational_resampler_xxx_2, 0), (self.qtgui_freq_sink_x_0, 1))
self.connect((self.rational_resampler_xxx_2, 0), (self.qtgui_waterfall_sink_x_0_0, 0))
self.connect((self.rational_resampler_xxx_2, 0), (self.rational_resampler_xxx_2_0, 0))
self.connect((self.rational_resampler_xxx_2_0, 0), (self.low_pass_filter_0_1_0, 0))
self.connect((self.rational_resampler_xxx_2_0, 0), (self.qtgui_freq_sink_x_0_0, 1))
self.connect((self.rational_resampler_xxx_2_0, 0), (self.qtgui_waterfall_sink_x_0_0_0, 0))
self.connect((self.single_pole_iir_filter_xx_0, 0), (self.blocks_nlog10_ff_0, 0))
self.connect((self.single_pole_iir_filter_xx_0_0, 0), (self.blocks_nlog10_ff_0_0, 0))
self.connect((self.single_pole_iir_filter_xx_0_0_0, 0), (self.blocks_nlog10_ff_0_0_0, 0))
self.connect((self.single_pole_iir_filter_xx_0_1, 0), (self.blocks_nlog10_ff_0_0_0_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "radio_jove_dual_rtlsdr_sigmf_3")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_path(self):
return self.path
def set_path(self, path):
self.path = path
self.set_fp_wav("{:s}/{:s}".format(self.path, self.fn_wav))
self.set_fp_1("{:s}/{:s}".format(self.path, self.fn_1))
self.set_fp_0("{:s}/{:s}".format(self.path, self.fn_0))
def get_signal_type(self):
return self.signal_type
def set_signal_type(self, signal_type):
self.signal_type = signal_type
def get_ts_str(self):
return self.ts_str
def set_ts_str(self, ts_str):
self.ts_str = ts_str
self.set_fn_wav("{:s}_{:s}_{:s}.wav".format(signal_type.upper(), pol.upper(),self.ts_str))
self.set_fn_1("{:s}_{:s}_{:s}".format(signal_type.upper(), antenna_1.upper(),self.ts_str))
self.set_fn_0("{:s}_{:s}_{:s}".format(signal_type.upper(), antenna_0.upper(),self.ts_str))
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.set_offset(self.samp_rate/4)
self.set_audio_rate_in(self.samp_rate/(self.decim_1*self.decim_2) *(self.interp_1*self.interp_2))
self.qtgui_waterfall_sink_x_0_1.set_frequency_range(0, self.samp_rate/(self.decim_1*self.decim_2) *(self.interp_1*self.interp_2))
self.qtgui_waterfall_sink_x_0_0_0.set_frequency_range(0, self.samp_rate/(self.decim_1*self.decim_2) *(self.interp_1*self.interp_2))
self.qtgui_waterfall_sink_x_0_0.set_frequency_range(self.rx_freq, self.samp_rate/self.decim_1*self.interp_1)
self.qtgui_waterfall_sink_x_0.set_frequency_range(self.rx_freq, self.samp_rate/self.decim_1*self.interp_1)
self.qtgui_time_sink_x_0.set_samp_rate(self.samp_rate)
self.qtgui_freq_sink_x_0_0.set_frequency_range(0, self.samp_rate/(self.decim_1*self.decim_2) *(self.interp_1*self.interp_2))
self.qtgui_freq_sink_x_0.set_frequency_range(self.rx_freq*0, self.samp_rate / self.decim_1 *self.interp_1)
self.osmosdr_source_0_0.set_sample_rate(self.samp_rate)
self.osmosdr_source_0.set_sample_rate(self.samp_rate)
self.analog_sig_source_x_0.set_sampling_freq(self.samp_rate)
def get_pol(self):
return self.pol
def set_pol(self, pol):
self.pol = pol
def get_interp_2(self):
return self.interp_2
def set_interp_2(self, interp_2):
self.interp_2 = interp_2
self.set_audio_rate_in(self.samp_rate/(self.decim_1*self.decim_2) *(self.interp_1*self.interp_2))
self.qtgui_waterfall_sink_x_0_1.set_frequency_range(0, self.samp_rate/(self.decim_1*self.decim_2) *(self.interp_1*self.interp_2))
self.qtgui_waterfall_sink_x_0_0_0.set_frequency_range(0, self.samp_rate/(self.decim_1*self.decim_2) *(self.interp_1*self.interp_2))
self.qtgui_freq_sink_x_0_0.set_frequency_range(0, self.samp_rate/(self.decim_1*self.decim_2) *(self.interp_1*self.interp_2))
def get_interp_1(self):
return self.interp_1
def set_interp_1(self, interp_1):
self.interp_1 = interp_1
self.set_audio_rate_in(self.samp_rate/(self.decim_1*self.decim_2) *(self.interp_1*self.interp_2))
self.qtgui_waterfall_sink_x_0_1.set_frequency_range(0, self.samp_rate/(self.decim_1*self.decim_2) *(self.interp_1*self.interp_2))
self.qtgui_waterfall_sink_x_0_0_0.set_frequency_range(0, self.samp_rate/(self.decim_1*self.decim_2) *(self.interp_1*self.interp_2))
self.qtgui_waterfall_sink_x_0_0.set_frequency_range(self.rx_freq, self.samp_rate/self.decim_1*self.interp_1)
self.qtgui_waterfall_sink_x_0.set_frequency_range(self.rx_freq, self.samp_rate/self.decim_1*self.interp_1)
self.qtgui_freq_sink_x_0_0.set_frequency_range(0, self.samp_rate/(self.decim_1*self.decim_2) *(self.interp_1*self.interp_2))
self.qtgui_freq_sink_x_0.set_frequency_range(self.rx_freq*0, self.samp_rate / self.decim_1 *self.interp_1)
def get_decim_2(self):
return self.decim_2
def set_decim_2(self, decim_2):
self.decim_2 = decim_2
self.set_audio_rate_in(self.samp_rate/(self.decim_1*self.decim_2) *(self.interp_1*self.interp_2))
self.qtgui_waterfall_sink_x_0_1.set_frequency_range(0, self.samp_rate/(self.decim_1*self.decim_2) *(self.interp_1*self.interp_2))
self.qtgui_waterfall_sink_x_0_0_0.set_frequency_range(0, self.samp_rate/(self.decim_1*self.decim_2) *(self.interp_1*self.interp_2))
self.qtgui_freq_sink_x_0_0.set_frequency_range(0, self.samp_rate/(self.decim_1*self.decim_2) *(self.interp_1*self.interp_2))
def get_decim_1(self):
return self.decim_1
def set_decim_1(self, decim_1):
self.decim_1 = decim_1
self.set_audio_rate_in(self.samp_rate/(self.decim_1*self.decim_2) *(self.interp_1*self.interp_2))
self.qtgui_waterfall_sink_x_0_1.set_frequency_range(0, self.samp_rate/(self.decim_1*self.decim_2) *(self.interp_1*self.interp_2))
self.qtgui_waterfall_sink_x_0_0_0.set_frequency_range(0, self.samp_rate/(self.decim_1*self.decim_2) *(self.interp_1*self.interp_2))
self.qtgui_waterfall_sink_x_0_0.set_frequency_range(self.rx_freq, self.samp_rate/self.decim_1*self.interp_1)
self.qtgui_waterfall_sink_x_0.set_frequency_range(self.rx_freq, self.samp_rate/self.decim_1*self.interp_1)
self.qtgui_freq_sink_x_0_0.set_frequency_range(0, self.samp_rate/(self.decim_1*self.decim_2) *(self.interp_1*self.interp_2))
self.qtgui_freq_sink_x_0.set_frequency_range(self.rx_freq*0, self.samp_rate / self.decim_1 *self.interp_1)
def get_antenna_1(self):
return self.antenna_1
def set_antenna_1(self, antenna_1):
self.antenna_1 = antenna_1
def get_antenna_0(self):
return self.antenna_0
def set_antenna_0(self, antenna_0):
self.antenna_0 = antenna_0
def get_phase_shift_rad(self):
return self.phase_shift_rad
def set_phase_shift_rad(self, phase_shift_rad):
self.phase_shift_rad = phase_shift_rad
self.set_phase_shift_complex(complex(math.cos(self.phase_shift_rad),math.sin(self.phase_shift_rad)))
def get_fn_wav(self):
return self.fn_wav
def set_fn_wav(self, fn_wav):
self.fn_wav = fn_wav
self.set_fp_wav("{:s}/{:s}".format(self.path, self.fn_wav))
def get_fn_1(self):
return self.fn_1
def set_fn_1(self, fn_1):
self.fn_1 = fn_1
self.set_fp_1("{:s}/{:s}".format(self.path, self.fn_1))
def get_fn_0(self):
return self.fn_0
def set_fn_0(self, fn_0):
self.fn_0 = fn_0
self.set_fp_0("{:s}/{:s}".format(self.path, self.fn_0))
def get_audio_rate_in(self):
return self.audio_rate_in
def set_audio_rate_in(self, audio_rate_in):
self.audio_rate_in = audio_rate_in
self.set_audio_rate_out(self.audio_rate_in/3)
self.low_pass_filter_0_1_0.set_taps(firdes.low_pass(1, self.audio_rate_in, self.audio_lpf_cut, 500, firdes.WIN_HAMMING, 6.76))
self.low_pass_filter_0_1.set_taps(firdes.low_pass(1, self.audio_rate_in, self.audio_lpf_cut, 500, firdes.WIN_HAMMING, 6.76))
def get_vol_right(self):
return self.vol_right
def set_vol_right(self, vol_right):
self.vol_right = vol_right
self.blocks_multiply_const_vxx_0_0.set_k((self.vol_right, ))
def get_vol_left(self):
return self.vol_left
def set_vol_left(self, vol_left):
self.vol_left = vol_left
self.blocks_multiply_const_vxx_0.set_k((self.vol_left, ))
def get_rx_freq(self):
return self.rx_freq
def set_rx_freq(self, rx_freq):
self.rx_freq = rx_freq
Qt.QMetaObject.invokeMethod(self._rx_freq_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.rx_freq)))
self.qtgui_waterfall_sink_x_0_0.set_frequency_range(self.rx_freq, self.samp_rate/self.decim_1*self.interp_1)
self.qtgui_waterfall_sink_x_0.set_frequency_range(self.rx_freq, self.samp_rate/self.decim_1*self.interp_1)
self.qtgui_freq_sink_x_0.set_frequency_range(self.rx_freq*0, self.samp_rate / self.decim_1 *self.interp_1)
self.osmosdr_source_0_0.set_center_freq(self.rx_freq-self.offset, 0)
self.osmosdr_source_0.set_center_freq(self.rx_freq - self.offset, 0)
def get_phase_shift_complex(self):
return self.phase_shift_complex
def set_phase_shift_complex(self, phase_shift_complex):
self.phase_shift_complex = phase_shift_complex
def get_offset(self):
return self.offset
def set_offset(self, offset):
self.offset = offset
self.osmosdr_source_0_0.set_center_freq(self.rx_freq-self.offset, 0)
self.osmosdr_source_0.set_center_freq(self.rx_freq - self.offset, 0)
self.analog_sig_source_x_0.set_frequency(-1 * (self.offset))
def get_lpf_cut(self):
return self.lpf_cut
def set_lpf_cut(self, lpf_cut):
self.lpf_cut = lpf_cut
Qt.QMetaObject.invokeMethod(self._lpf_cut_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.lpf_cut)))
def get_iir_alpha(self):
return self.iir_alpha
def set_iir_alpha(self, iir_alpha):
self.iir_alpha = iir_alpha
self.single_pole_iir_filter_xx_0_1.set_taps(self.iir_alpha)
self.single_pole_iir_filter_xx_0_0_0.set_taps(self.iir_alpha)
self.single_pole_iir_filter_xx_0_0.set_taps(self.iir_alpha)
self.single_pole_iir_filter_xx_0.set_taps(self.iir_alpha)
def get_fp_wav(self):
return self.fp_wav
def set_fp_wav(self, fp_wav):
self.fp_wav = fp_wav
def get_fp_1(self):
return self.fp_1
def set_fp_1(self, fp_1):
self.fp_1 = fp_1
def get_fp_0(self):
return self.fp_0
def set_fp_0(self, fp_0):
self.fp_0 = fp_0
def get_audio_rate_out(self):
return self.audio_rate_out
def set_audio_rate_out(self, audio_rate_out):
self.audio_rate_out = audio_rate_out
self.qtgui_waterfall_sink_x_1_0.set_frequency_range(0, self.audio_rate_out)
self.qtgui_waterfall_sink_x_0_1_0.set_frequency_range(0, self.audio_rate_out)
self.qtgui_waterfall_sink_x_0_0_0_0.set_frequency_range(0, self.audio_rate_out)
self.qtgui_time_sink_x_0_0.set_samp_rate(self.audio_rate_out)
self.qtgui_freq_sink_x_1.set_frequency_range(0, self.audio_rate_out)
self.qtgui_freq_sink_x_0_0_0.set_frequency_range(0, self.audio_rate_out)
def get_audio_lpf_cut(self):
return self.audio_lpf_cut
def set_audio_lpf_cut(self, audio_lpf_cut):
self.audio_lpf_cut = audio_lpf_cut
Qt.QMetaObject.invokeMethod(self._audio_lpf_cut_line_edit, "setText", Qt.Q_ARG("QString", eng_notation.num_to_str(self.audio_lpf_cut)))
self.low_pass_filter_0_1_0.set_taps(firdes.low_pass(1, self.audio_rate_in, self.audio_lpf_cut, 500, firdes.WIN_HAMMING, 6.76))
self.low_pass_filter_0_1.set_taps(firdes.low_pass(1, self.audio_rate_in, self.audio_lpf_cut, 500, firdes.WIN_HAMMING, 6.76))
def argument_parser():
description = 'Receive Jupiter Emissions with RTL-SDR on 20.1MHz'
parser = OptionParser(usage="%prog: [options]", option_class=eng_option, description=description)
parser.add_option(
"", "--path", dest="path", type="string", default="/captures/radio_jove",
help="Set path [default=%default]")
parser.add_option(
"", "--signal-type", dest="signal_type", type="string", default='RADIO-JOVE',
help="Set signal_type [default=%default]")
return parser
def main(top_block_cls=radio_jove_dual_rtlsdr_sigmf_3, options=None):
if options is None:
options, _ = argument_parser().parse_args()
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls(path=options.path, signal_type=options.signal_type)
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
| 48.680886
| 143
| 0.650922
| 9,665
| 59,342
| 3.571857
| 0.04118
| 0.026997
| 0.041365
| 0.026968
| 0.88277
| 0.833179
| 0.795435
| 0.761601
| 0.714906
| 0.685041
| 0
| 0.054872
| 0.221176
| 59,342
| 1,218
| 144
| 48.720854
| 0.692083
| 0.012066
| 0
| 0.36156
| 0
| 0
| 0.034234
| 0.001034
| 0.001903
| 0
| 0
| 0
| 0
| 0
| null | null | 0.016175
| 0.019981
| null | null | 0.000951
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4ae0be5540abf38b34cb41045d078856bdad5cd8
| 44
|
py
|
Python
|
modulepackage/trial_import/simplefoo.py
|
Chyi341152/chyi-book
|
ddeaf49d69a68f5718c20c3b7fe6fd37381d21eb
|
[
"MIT"
] | null | null | null |
modulepackage/trial_import/simplefoo.py
|
Chyi341152/chyi-book
|
ddeaf49d69a68f5718c20c3b7fe6fd37381d21eb
|
[
"MIT"
] | null | null | null |
modulepackage/trial_import/simplefoo.py
|
Chyi341152/chyi-book
|
ddeaf49d69a68f5718c20c3b7fe6fd37381d21eb
|
[
"MIT"
] | null | null | null |
# simplefoo.py
print('imported simplefoo')
| 11
| 27
| 0.75
| 5
| 44
| 6.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 44
| 3
| 28
| 14.666667
| 0.846154
| 0.272727
| 0
| 0
| 0
| 0
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
4af5fdc36257d3ecae3590d45602b993ea6fc8a4
| 110
|
py
|
Python
|
app/back/mongo/data/collect/indicators/__init__.py
|
jgphilpott/polyplot
|
c46861174ee5881dadffbfb2278d555462523547
|
[
"MIT"
] | 5
|
2021-05-17T14:17:14.000Z
|
2021-12-14T12:54:32.000Z
|
app/back/mongo/data/collect/indicators/__init__.py
|
jgphilpott/iGraph
|
2a91ba57e4950856a83d3a109753f8f2badee829
|
[
"MIT"
] | 8
|
2020-02-09T02:48:41.000Z
|
2021-05-16T04:57:02.000Z
|
app/back/mongo/data/collect/indicators/__init__.py
|
jgphilpott/iGraph
|
2a91ba57e4950856a83d3a109753f8f2badee829
|
[
"MIT"
] | 2
|
2016-09-12T03:48:16.000Z
|
2019-05-04T14:15:19.000Z
|
from back.mongo.data.collect.indicators.model import *
from back.mongo.data.collect.indicators.mongo import *
| 36.666667
| 54
| 0.818182
| 16
| 110
| 5.625
| 0.5
| 0.177778
| 0.288889
| 0.377778
| 0.755556
| 0.755556
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 110
| 2
| 55
| 55
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
ab2b5a58764d1a37b2312c19a41eda3a0c929a21
| 10,008
|
py
|
Python
|
official/vision/beta/evaluation/segmentation_metrics.py
|
youting83/models
|
ea959d1aa8dd8c8a71770dcaba2961a4c07184e4
|
[
"Apache-2.0"
] | 2
|
2021-11-03T05:14:54.000Z
|
2021-11-09T11:56:14.000Z
|
official/vision/beta/evaluation/segmentation_metrics.py
|
youting83/models
|
ea959d1aa8dd8c8a71770dcaba2961a4c07184e4
|
[
"Apache-2.0"
] | null | null | null |
official/vision/beta/evaluation/segmentation_metrics.py
|
youting83/models
|
ea959d1aa8dd8c8a71770dcaba2961a4c07184e4
|
[
"Apache-2.0"
] | 1
|
2021-10-03T08:34:26.000Z
|
2021-10-03T08:34:26.000Z
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics for segmentation."""
import tensorflow as tf
from official.vision import keras_cv
class MeanIoU(tf.keras.metrics.MeanIoU):
"""Mean IoU metric for semantic segmentation.
This class utilizes tf.keras.metrics.MeanIoU to perform batched mean iou when
both input images and groundtruth masks are resized to the same size
(rescale_predictions=False). It also computes mean iou on groundtruth original
sizes, in which case, each prediction is rescaled back to the original image
size.
"""
def __init__(
self, num_classes, rescale_predictions=False, name=None, dtype=None):
"""Constructs Segmentation evaluator class.
Args:
num_classes: `int`, number of classes.
rescale_predictions: `bool`, whether to scale back prediction to original
image sizes. If True, y_true['image_info'] is used to rescale
predictions.
name: `str`, name of the metric instance..
dtype: data type of the metric result.
"""
self._rescale_predictions = rescale_predictions
super(MeanIoU, self).__init__(
num_classes=num_classes, name=name, dtype=dtype)
def update_state(self, y_true, y_pred):
"""Updates metric state.
Args:
y_true: `dict`, dictionary with the following name, and key values.
- masks: [batch, width, height, 1], groundtruth masks.
- valid_masks: [batch, width, height, 1], valid elements in the mask.
- image_info: [batch, 4, 2], a tensor that holds information about
original and preprocessed images. Each entry is in the format of
[[original_height, original_width], [input_height, input_width],
[y_scale, x_scale], [y_offset, x_offset]], where [desired_height,
desired_width] is the actual scaled image size, and [y_scale, x_scale]
is the scaling factor, which is the ratio of scaled dimension /
original dimension.
y_pred: Tensor [batch, width_p, height_p, num_classes], predicated masks.
"""
predictions = y_pred
masks = y_true['masks']
valid_masks = y_true['valid_masks']
images_info = y_true['image_info']
if isinstance(predictions, tuple) or isinstance(predictions, list):
predictions = tf.concat(predictions, axis=0)
masks = tf.concat(masks, axis=0)
valid_masks = tf.concat(valid_masks, axis=0)
images_info = tf.concat(images_info, axis=0)
# Ignore mask elements is set to zero for argmax op.
masks = tf.where(valid_masks, masks, tf.zeros_like(masks))
if self._rescale_predictions:
# This part can only run on cpu/gpu due to dynamic image resizing.
for i in range(tf.shape(predictions)[0]):
mask = masks[i]
valid_mask = valid_masks[i]
predicted_mask = predictions[i]
image_info = images_info[i]
rescale_size = tf.cast(
tf.math.ceil(image_info[1, :] / image_info[2, :]), tf.int32)
image_shape = tf.cast(image_info[0, :], tf.int32)
offsets = tf.cast(image_info[3, :], tf.int32)
predicted_mask = tf.image.resize(
predicted_mask,
rescale_size,
method=tf.image.ResizeMethod.BILINEAR)
predicted_mask = tf.image.crop_to_bounding_box(predicted_mask,
offsets[0], offsets[1],
image_shape[0],
image_shape[1])
mask = tf.image.crop_to_bounding_box(mask, 0, 0, image_shape[0],
image_shape[1])
valid_mask = tf.image.crop_to_bounding_box(valid_mask, 0, 0,
image_shape[0],
image_shape[1])
predicted_mask = tf.argmax(predicted_mask, axis=2)
flatten_predictions = tf.reshape(predicted_mask, shape=[1, -1])
flatten_masks = tf.reshape(mask, shape=[1, -1])
flatten_valid_masks = tf.reshape(valid_mask, shape=[1, -1])
super(MeanIoU, self).update_state(
flatten_masks, flatten_predictions,
tf.cast(flatten_valid_masks, tf.float32))
else:
predictions = tf.image.resize(
predictions,
tf.shape(masks)[1:3],
method=tf.image.ResizeMethod.BILINEAR)
predictions = tf.argmax(predictions, axis=3)
flatten_predictions = tf.reshape(predictions, shape=[-1])
flatten_masks = tf.reshape(masks, shape=[-1])
flatten_valid_masks = tf.reshape(valid_masks, shape=[-1])
super(MeanIoU, self).update_state(
flatten_masks, flatten_predictions,
tf.cast(flatten_valid_masks, tf.float32))
class PerClassIoU(keras_cv.metrics.PerClassIoU):
"""Per Class IoU metric for semantic segmentation.
This class utilizes keras_cv.metrics.PerClassIoU to perform batched per class
iou when both input images and groundtruth masks are resized to the same size
(rescale_predictions=False). It also computes per class iou on groundtruth
original sizes, in which case, each prediction is rescaled back to the
original image size.
"""
def __init__(
self, num_classes, rescale_predictions=False, name=None, dtype=None):
"""Constructs Segmentation evaluator class.
Args:
num_classes: `int`, number of classes.
rescale_predictions: `bool`, whether to scale back prediction to original
image sizes. If True, y_true['image_info'] is used to rescale
predictions.
name: `str`, name of the metric instance..
dtype: data type of the metric result.
"""
self._rescale_predictions = rescale_predictions
super(PerClassIoU, self).__init__(
num_classes=num_classes, name=name, dtype=dtype)
def update_state(self, y_true, y_pred):
"""Updates metric state.
Args:
y_true: `dict`, dictionary with the following name, and key values.
- masks: [batch, width, height, 1], groundtruth masks.
- valid_masks: [batch, width, height, 1], valid elements in the mask.
- image_info: [batch, 4, 2], a tensor that holds information about
original and preprocessed images. Each entry is in the format of
[[original_height, original_width], [input_height, input_width],
[y_scale, x_scale], [y_offset, x_offset]], where [desired_height,
desired_width] is the actual scaled image size, and [y_scale, x_scale]
is the scaling factor, which is the ratio of scaled dimension /
original dimension.
y_pred: Tensor [batch, width_p, height_p, num_classes], predicated masks.
"""
predictions = y_pred
masks = y_true['masks']
valid_masks = y_true['valid_masks']
images_info = y_true['image_info']
if isinstance(predictions, tuple) or isinstance(predictions, list):
predictions = tf.concat(predictions, axis=0)
masks = tf.concat(masks, axis=0)
valid_masks = tf.concat(valid_masks, axis=0)
images_info = tf.concat(images_info, axis=0)
# Ignore mask elements is set to zero for argmax op.
masks = tf.where(valid_masks, masks, tf.zeros_like(masks))
if self._rescale_predictions:
# This part can only run on cpu/gpu due to dynamic image resizing.
for i in range(tf.shape(predictions)[0]):
mask = masks[i]
valid_mask = valid_masks[i]
predicted_mask = predictions[i]
image_info = images_info[i]
rescale_size = tf.cast(
tf.math.ceil(image_info[1, :] / image_info[2, :]), tf.int32)
image_shape = tf.cast(image_info[0, :], tf.int32)
offsets = tf.cast(image_info[3, :], tf.int32)
predicted_mask = tf.image.resize(
predicted_mask,
rescale_size,
method=tf.image.ResizeMethod.BILINEAR)
predicted_mask = tf.image.crop_to_bounding_box(predicted_mask,
offsets[0], offsets[1],
image_shape[0],
image_shape[1])
mask = tf.image.crop_to_bounding_box(mask, 0, 0, image_shape[0],
image_shape[1])
valid_mask = tf.image.crop_to_bounding_box(valid_mask, 0, 0,
image_shape[0],
image_shape[1])
predicted_mask = tf.argmax(predicted_mask, axis=2)
flatten_predictions = tf.reshape(predicted_mask, shape=[1, -1])
flatten_masks = tf.reshape(mask, shape=[1, -1])
flatten_valid_masks = tf.reshape(valid_mask, shape=[1, -1])
super(PerClassIoU, self).update_state(
flatten_masks, flatten_predictions,
tf.cast(flatten_valid_masks, tf.float32))
else:
predictions = tf.image.resize(
predictions,
tf.shape(masks)[1:3],
method=tf.image.ResizeMethod.BILINEAR)
predictions = tf.argmax(predictions, axis=3)
flatten_predictions = tf.reshape(predictions, shape=[-1])
flatten_masks = tf.reshape(masks, shape=[-1])
flatten_valid_masks = tf.reshape(valid_masks, shape=[-1])
super(PerClassIoU, self).update_state(
flatten_masks, flatten_predictions,
tf.cast(flatten_valid_masks, tf.float32))
| 42.95279
| 80
| 0.63789
| 1,286
| 10,008
| 4.784603
| 0.16874
| 0.039005
| 0.019503
| 0.024703
| 0.878921
| 0.878921
| 0.878921
| 0.878921
| 0.862994
| 0.862994
| 0
| 0.014198
| 0.268086
| 10,008
| 232
| 81
| 43.137931
| 0.825802
| 0.376199
| 0
| 0.95
| 0
| 0
| 0.008677
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.016667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ab4478177b7e4c3038a8dab02d6e538d63b6cea7
| 27,882
|
py
|
Python
|
landlab/grid/raster_mappers.py
|
scottrdavid/landlab
|
bb8414df55b4e5fb9198468fadbe3c725ef60601
|
[
"MIT"
] | 257
|
2015-01-13T16:01:21.000Z
|
2022-03-29T22:37:43.000Z
|
landlab/grid/raster_mappers.py
|
scottrdavid/landlab
|
bb8414df55b4e5fb9198468fadbe3c725ef60601
|
[
"MIT"
] | 1,222
|
2015-02-05T21:36:53.000Z
|
2022-03-31T17:53:49.000Z
|
landlab/grid/raster_mappers.py
|
scottrdavid/landlab
|
bb8414df55b4e5fb9198468fadbe3c725ef60601
|
[
"MIT"
] | 274
|
2015-02-11T19:56:08.000Z
|
2022-03-28T23:31:07.000Z
|
#! /usr/bin/env python
"""Grid element mappers that are specific to raster grids.
Mapping functions unique to raster grids
++++++++++++++++++++++++++++++++++++++++
.. autosummary::
~landlab.grid.raster_mappers.map_sum_of_inlinks_to_node
~landlab.grid.raster_mappers.map_mean_of_inlinks_to_node
~landlab.grid.raster_mappers.map_max_of_inlinks_to_node
~landlab.grid.raster_mappers.map_min_of_inlinks_to_node
~landlab.grid.raster_mappers.map_sum_of_outlinks_to_node
~landlab.grid.raster_mappers.map_mean_of_outlinks_to_node
~landlab.grid.raster_mappers.map_max_of_outlinks_to_node
~landlab.grid.raster_mappers.map_min_of_outlinks_to_node
~landlab.grid.raster_mappers.map_mean_of_links_to_node
~landlab.grid.raster_mappers.map_mean_of_horizontal_links_to_node
~landlab.grid.raster_mappers.map_mean_of_horizontal_active_links_to_node
~landlab.grid.raster_mappers.map_mean_of_vertical_links_to_node
~landlab.grid.raster_mappers.map_mean_of_vertical_active_links_to_node
"""
import numpy as np
def _node_out_link_ids(shape):
"""Links leaving each node.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
tuple :
Tuple of array of link IDs as (vertical_links, horizontal_links).
Examples
--------
>>> from landlab.grid.raster_mappers import _node_out_link_ids
>>> (vert, horiz) = _node_out_link_ids((3, 4))
>>> vert
array([[ 3, 4, 5, 6],
[10, 11, 12, 13],
[-1, -1, -1, -1]])
>>> horiz
array([[ 0, 1, 2, -1],
[ 7, 8, 9, -1],
[14, 15, 16, -1]])
"""
from ..graph.structured_quad.structured_quad import StructuredQuadGraphTopology
layout = StructuredQuadGraphTopology(shape)
node_horizontal_link_ids = np.empty(shape, int)
node_horizontal_link_ids[:, :-1] = layout.horizontal_links.reshape(
(shape[0], shape[1] - 1)
)
node_horizontal_link_ids[:, -1] = -1
node_vertical_link_ids = np.empty(shape, int)
node_vertical_link_ids[:-1, :] = layout.vertical_links.reshape(
(shape[0] - 1, shape[1])
)
node_vertical_link_ids[-1, :] = -1
return node_vertical_link_ids, node_horizontal_link_ids
def _node_in_link_ids(shape):
"""Links entering each node.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
tuple :
Tuple of array of link IDs as (vertical_links, horizontal_links).
Examples
--------
>>> from landlab.grid.raster_mappers import _node_in_link_ids
>>> (vert, horiz) = _node_in_link_ids((3, 4))
>>> vert
array([[-1, -1, -1, -1],
[ 3, 4, 5, 6],
[10, 11, 12, 13]])
>>> horiz
array([[-1, 0, 1, 2],
[-1, 7, 8, 9],
[-1, 14, 15, 16]])
"""
from ..graph.structured_quad.structured_quad import StructuredQuadGraphTopology
layout = StructuredQuadGraphTopology(shape)
node_horizontal_link_ids = np.empty(shape, int)
node_horizontal_link_ids[:, 1:] = layout.horizontal_links.reshape(
(shape[0], shape[1] - 1)
)
node_horizontal_link_ids[:, 0] = -1
node_vertical_link_ids = np.empty(shape, int)
node_vertical_link_ids[1:, :] = layout.vertical_links.reshape(
(shape[0] - 1, shape[1])
)
node_vertical_link_ids[0, :] = -1
return node_vertical_link_ids, node_horizontal_link_ids
def _number_of_links_per_node(shape):
"""Number of links touching each node.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
ndarray :
Array of number of links per node.
Examples
--------
>>> from landlab.grid.raster_mappers import _number_of_links_per_node
>>> _number_of_links_per_node((3, 4))
array([[2, 3, 3, 2],
[3, 4, 4, 3],
[2, 3, 3, 2]])
"""
from ..graph.structured_quad.structured_quad import StructuredQuadGraphTopology
layout = StructuredQuadGraphTopology(shape)
n_links_at_node = np.full(shape[0] * shape[1], 4, int)
n_links_at_node[layout.perimeter_nodes] = 3
n_links_at_node[layout.corner_nodes] = 2
return n_links_at_node.reshape(shape)
def map_sum_of_inlinks_to_node(grid, var_name, out=None):
"""Map the sum of links entering a node to the node.
map_sum_of_inlinks_to_node takes an array *at the links* and finds the
inlink values for each node in the grid. it sums the inlinks and returns
values at the nodes.
.. note::
This considers all inactive links to have a value of 0.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_sum_of_inlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field("z", np.arange(17.), at="link")
>>> map_sum_of_inlinks_to_node(rmg, 'z')
array([ 0., 0., 1., 2., 3., 11., 13., 15., 10., 25., 27.,
29.])
LLCATS: NINF LINF MAP
"""
if out is None:
out = grid.empty(centering="node")
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
south, west = _node_in_link_ids(grid.shape)
south, west = south.reshape(south.size), west.reshape(west.size)
out[:] = values_at_links[south] + values_at_links[west]
return out
def map_mean_of_inlinks_to_node(grid, var_name, out=None):
"""Map the mean of links entering a node to the node.
map_mean_of_inlinks_to_node takes an array *at the links* and finds the
inlink values for each node in the grid. It finds the average of
the inlinks and returns values at the nodes.
This considers all inactive links to have a value of 0.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_mean_of_inlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field("z", np.arange(17.), at="link")
>>> map_mean_of_inlinks_to_node(rmg, 'z')
array([ 0. , 0. , 0.5, 1. , 1.5, 5.5, 6.5, 7.5, 5. ,
12.5, 13.5, 14.5])
LLCATS: NINF LINF MAP
"""
if out is None:
out = grid.empty(centering="node")
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
south, west = _node_in_link_ids(grid.shape)
south, west = south.reshape(south.size), west.reshape(west.size)
out[:] = 0.5 * (values_at_links[south] + values_at_links[west])
return out
def map_max_of_inlinks_to_node(grid, var_name, out=None):
"""Map the maximum of links entering a node to the node.
map_max_of_inlinks_to_node takes an array *at the links* and finds the
inlink values for each node in the grid. it finds the maximum value at the
the inlinks and returns values at the nodes.
.. note::
This considers all inactive links to have a value of 0.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_max_of_inlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field("z", np.arange(17.), at="link")
>>> map_max_of_inlinks_to_node(rmg, 'z')
array([ 0., 0., 1., 2.,
3., 7., 8., 9.,
10., 14., 15., 16.])
LLCATS: NINF LINF MAP
"""
if out is None:
out = grid.empty(centering="node")
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
south, west = _node_in_link_ids(grid.shape)
south, west = south.reshape(south.size), west.reshape(west.size)
out[:] = np.maximum(values_at_links[south], values_at_links[west])
return out
def map_min_of_inlinks_to_node(grid, var_name, out=None):
"""Map the minimum of links entering a node to the node.
map_min_of_inlinks_to_node takes an array *at the links* and finds the
inlink values for each node in the grid. it finds the minimum value at the
the inlinks and returns values at the nodes.
.. note::
This considers all inactive links to have a value of 0.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_min_of_inlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field("z", np.arange(17.), at="link")
>>> map_min_of_inlinks_to_node(rmg, 'z')
array([ 0., 0., 0., 0., 0., 4., 5., 6., 0., 11., 12.,
13.])
LLCATS: NINF LINF MAP
"""
if out is None:
out = grid.empty(centering="node")
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
south, west = _node_in_link_ids(grid.shape)
south, west = south.reshape(south.size), west.reshape(west.size)
out[:] = np.minimum(values_at_links[south], values_at_links[west])
return out
def map_sum_of_outlinks_to_node(grid, var_name, out=None):
"""Map the sum of links leaving a node to the node.
map_sum_of_outlinks_to_node takes an array *at the links* and finds the
outlink values for each node in the grid. it sums the outlinks and returns
values at the nodes.
.. note::
This considers all inactive links to have a value of 0.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_sum_of_outlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field("z", np.arange(17.), at="link")
>>> map_sum_of_outlinks_to_node(rmg, 'z')
array([ 3., 5., 7., 6., 17., 19., 21., 13., 14., 15., 16.,
0.])
LLCATS: NINF LINF MAP
"""
if out is None:
out = grid.empty(centering="node")
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
north, east = _node_out_link_ids(grid.shape)
north, east = north.reshape(north.size), east.reshape(east.size)
out[:] = values_at_links[north] + values_at_links[east]
return out
def map_mean_of_outlinks_to_node(grid, var_name, out=None):
"""Map the mean of links leaving a node to the node.
map_mean_of_outlinks_to_node takes an array *at the links* and finds the
outlink values for each node in the grid. it finds the average of
the outlinks and returns values at the nodes.
.. note::
This considers all inactive links to have a value of 0.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_mean_of_outlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field("z", np.arange(17.), at="link")
>>> map_mean_of_outlinks_to_node(rmg, 'z')
array([ 1.5, 2.5, 3.5, 3. , 8.5, 9.5, 10.5, 6.5, 7. ,
7.5, 8. , 0. ])
LLCATS: NINF LINF MAP
"""
if out is None:
out = grid.empty(centering="node")
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
north, east = _node_out_link_ids(grid.shape)
north, east = north.reshape(north.size), east.reshape(east.size)
out[:] = 0.5 * (values_at_links[north] + values_at_links[east])
return out
def map_max_of_outlinks_to_node(grid, var_name, out=None):
"""Map the max of links leaving a node to the node.
map_max_of_outlinks_to_node takes an array *at the links* and finds the
outlink values for each node in the grid. it finds the maximum value at the
the outlinks and returns values at the nodes.
.. note::
This considers all inactive links to have a value of 0.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_max_of_outlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field("z", np.arange(17.), at="link")
>>> map_max_of_outlinks_to_node(rmg, 'z')
array([ 3., 4., 5., 6., 10., 11., 12., 13., 14., 15., 16.,
0.])
LLCATS: NINF LINF MAP
"""
if out is None:
out = grid.empty(centering="node")
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
north, east = _node_out_link_ids(grid.shape)
north, east = north.reshape(north.size), east.reshape(east.size)
np.maximum(values_at_links[north], values_at_links[east], out=out)
return out
def map_min_of_outlinks_to_node(grid, var_name, out=None):
"""Map the min of links leaving a node to the node.
map_min_of_outlinks_to_node takes an array *at the links* and finds the
outlink values for each node in the grid. It finds the minimum value at the
the outlinks and returns values at the nodes.
.. note::
This considers all inactive links to have a value of 0.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_min_of_outlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field("z", np.arange(17.), at="link")
>>> map_min_of_outlinks_to_node(rmg, 'z')
array([ 0., 1., 2., 0., 7., 8., 9., 0., 0., 0., 0., 0.])
LLCATS: NINF LINF MAP
"""
if out is None:
out = grid.empty(centering="node")
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
north, east = _node_out_link_ids(grid.shape)
north, east = north.reshape(north.size), east.reshape(east.size)
np.minimum(values_at_links[north], values_at_links[east], out=out)
return out
def map_mean_of_links_to_node(grid, var_name, out=None):
"""Map the mean of links touching a node to the node.
map_mean_all_links_to_node takes an array *at the links* and finds the
average of all ~existing~ link neighbor values for each node in the grid.
it returns values at the nodes.
.. note::
This considers all inactive links to have a value of 0.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_mean_of_links_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field("z", np.arange(17.), at="link")
>>> map_mean_of_links_to_node(rmg, 'z')
array([ 1.5 , 1.66666667, 2.66666667, 4. ,
6.66666667, 7.5 , 8.5 , 9.33333333,
12. , 13.33333333, 14.33333333, 14.5 ])
LLCATS: NINF LINF MAP
"""
if out is None:
out = grid.empty(centering="node")
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
north, east = _node_out_link_ids(grid.shape)
north, east = north.reshape(north.size), east.reshape(east.size)
south, west = _node_in_link_ids(grid.shape)
south, west = south.reshape(south.size), west.reshape(west.size)
number_of_links = _number_of_links_per_node(grid.shape)
number_of_links = number_of_links.reshape(number_of_links.size)
number_of_links.astype(float, copy=False)
out[:] = (
values_at_links[north]
+ values_at_links[east]
+ values_at_links[south]
+ values_at_links[west]
) / number_of_links
return out
def map_mean_of_horizontal_links_to_node(grid, var_name, out=None):
"""Map the mean of links in the x direction touching a node to the node.
map_mean_of_horizontal_links_to_node takes an array *at the links* and
finds the average of all horizontal (x-direction) link neighbor values
for each node in the grid.
It returns an array at the nodes of the mean of these values. If a link
is absent, it is ignored.
Note that here a positive returned value means flux to the east, and
a negative to the west.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_mean_of_horizontal_links_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field("z", np.arange(17.), at="link")
>>> map_mean_of_horizontal_links_to_node(rmg, 'z')
array([ 0. , 0.5, 1.5, 2. , 7. , 7.5, 8.5, 9. , 14. ,
14.5, 15.5, 16. ])
LLCATS: NINF LINF MAP
"""
if out is None:
out = grid.empty(centering="node")
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
hoz_links = grid.links_at_node[:, [0, 2]]
hoz_link_dirs = np.fabs(grid.link_dirs_at_node[:, [0, 2]])
# ^retain "true" directions of links
valid_links = values_at_links[hoz_links] * hoz_link_dirs # invalids = 0
num_valid_links = hoz_link_dirs.sum(axis=1)
np.divide(valid_links.sum(axis=1), num_valid_links, out=out)
return out
def map_mean_of_horizontal_active_links_to_node(grid, var_name, out=None):
"""Map the mean of active links in the x direction touching node to the
node.
map_mean_of_horizontal_active_links_to_node takes an array *at the links*
and finds the average of all horizontal (x-direction) link neighbor values
for each node in the grid.
It returns an array at the nodes of the mean of these values. If a link
is absent, it is ignored. If a node has no active links, it receives 0.
Note that here a positive returned value means flux to the east, and
a negative to the west.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_mean_of_horizontal_active_links_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field("z", -np.arange(17, dtype=float), at="link")
>>> rmg.status_at_node[rmg.nodes_at_left_edge] = rmg.BC_NODE_IS_CLOSED
>>> map_mean_of_horizontal_active_links_to_node(rmg, 'z')
array([ 0. , 0. , 0. , 0. , 0. , -8. , -8.5, -9. , 0. , 0. , 0. ,
0. ])
LLCATS: NINF LINF MAP
"""
if out is None:
out = grid.zeros(centering="node", dtype=float)
else:
out.fill(0.0)
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
hoz_links = grid.links_at_node[:, [0, 2]]
hoz_link_dirs = np.fabs(grid.active_link_dirs_at_node[:, [0, 2]])
# ^retain "true" directions of links; no inactives now
valid_links = values_at_links[hoz_links] * hoz_link_dirs # invalids = 0
num_valid_links = hoz_link_dirs.sum(axis=1)
good_nodes = num_valid_links != 0
out[good_nodes] = valid_links.sum(axis=1)[good_nodes] / num_valid_links[good_nodes]
return out
def map_mean_of_vertical_links_to_node(grid, var_name, out=None):
"""Map the mean of links in the y direction touching a node to the node.
map_mean_of_vertical_links_to_node takes an array *at the links* and
finds the average of all vertical (y-direction) link neighbor values
for each node in the grid.
It returns an array at the nodes of the mean of these values. If a link
is absent, it is ignored.
Note that here a positive returned value means flux to the north, and
a negative to the south.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_mean_of_vertical_links_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field("z", np.arange(17.), at="link")
>>> map_mean_of_vertical_links_to_node(rmg, 'z')
array([ 3. , 4. , 5. , 6. , 6.5, 7.5, 8.5, 9.5, 10. ,
11. , 12. , 13. ])
LLCATS: NINF LINF MAP
"""
if out is None:
out = grid.empty(centering="node")
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
vert_links = grid.links_at_node[:, [1, 3]]
vert_link_dirs = np.fabs(grid.link_dirs_at_node[:, [1, 3]])
# ^retain "true" directions of links
valid_links = values_at_links[vert_links] * vert_link_dirs # invalids = 0
num_valid_links = vert_link_dirs.sum(axis=1)
np.divide(valid_links.sum(axis=1), num_valid_links, out=out)
return out
def map_mean_of_vertical_active_links_to_node(grid, var_name, out=None):
"""Map the mean of active links in the y direction touching node to the
node.
map_mean_of_vertical_active_links_to_node takes an array *at the links*
and finds the average of all vertical (y-direction) link neighbor values
for each node in the grid.
It returns an array at the nodes of the mean of these values. If a link
is absent, it is ignored. If a node has no active links, it receives 0.
Note that here a positive returned value means flux to the north, and
a negative to the south.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_mean_of_vertical_active_links_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field("z", -np.arange(17, dtype=float), at="link")
>>> rmg.status_at_node[rmg.nodes_at_bottom_edge] = rmg.BC_NODE_IS_CLOSED
>>> map_mean_of_vertical_active_links_to_node(rmg, 'z')
array([ 0., 0., 0., 0., 0., -11., -12., 0., 0., -11., -12.,
0.])
LLCATS: NINF LINF MAP
"""
if out is None:
out = grid.zeros(centering="node", dtype=float)
else:
out.fill(0.0)
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
vert_links = grid.links_at_node[:, [1, 3]]
vert_link_dirs = np.fabs(grid.active_link_dirs_at_node[:, [1, 3]])
# ^retain "true" directions of links; no inactives now
valid_links = values_at_links[vert_links] * vert_link_dirs # invalids = 0
num_valid_links = vert_link_dirs.sum(axis=1)
good_nodes = num_valid_links != 0
out[good_nodes] = valid_links.sum(axis=1)[good_nodes] / num_valid_links[good_nodes]
return out
def map_link_vector_components_to_node_raster(grid, data_at_link):
"""Map (x,y) vector components of data_at_link onto nodes.
Examples
--------
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> link_data = np.arange(rmg.number_of_links)
>>> x, y = map_link_vector_components_to_node_raster(rmg, link_data)
>>> x[5:7]
array([ 7.5, 8.5])
>>> y[5:7]
array([ 7.5, 8.5])
"""
x = grid.map_mean_of_horizontal_links_to_node(data_at_link)
y = grid.map_mean_of_vertical_links_to_node(data_at_link)
return x, y
| 31.187919
| 91
| 0.637329
| 4,143
| 27,882
| 4.062756
| 0.048274
| 0.042776
| 0.052519
| 0.04135
| 0.953838
| 0.940173
| 0.931737
| 0.910409
| 0.89009
| 0.84678
| 0
| 0.024657
| 0.249444
| 27,882
| 893
| 92
| 31.222844
| 0.779663
| 0.5949
| 0
| 0.731481
| 0
| 0
| 0.005564
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078704
| false
| 0
| 0.018519
| 0
| 0.175926
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ab4c150ad9eeb39be995408e128fe21b6d484495
| 37,187
|
py
|
Python
|
core/controllers/email_dashboard_test.py
|
prayutsu/oppia
|
e82da7653f7bbfb9ded0e1ba16cd9f481ff5a786
|
[
"Apache-2.0"
] | 2
|
2020-03-28T18:32:45.000Z
|
2021-02-07T18:29:31.000Z
|
core/controllers/email_dashboard_test.py
|
prayutsu/oppia
|
e82da7653f7bbfb9ded0e1ba16cd9f481ff5a786
|
[
"Apache-2.0"
] | 35
|
2019-02-23T20:31:21.000Z
|
2019-08-19T12:32:13.000Z
|
core/controllers/email_dashboard_test.py
|
prayutsu/oppia
|
e82da7653f7bbfb9ded0e1ba16cd9f481ff5a786
|
[
"Apache-2.0"
] | 1
|
2021-01-28T05:20:56.000Z
|
2021-01-28T05:20:56.000Z
|
# Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for email dashboard handler."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import taskqueue_services
from core.domain import user_query_jobs_one_off
from core.domain import user_query_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
(user_models, email_models) = models.Registry.import_models(
[models.NAMES.user, models.NAMES.email])
class EmailDashboardDataHandlerTests(test_utils.GenericTestBase):
SUBMITTER_EMAIL = 'submit@example.com'
SUBMITTER_USERNAME = 'submit'
USER_A_EMAIL = 'a@example.com'
USER_A_USERNAME = 'a'
def setUp(self):
super(EmailDashboardDataHandlerTests, self).setUp()
self.signup(self.SUBMITTER_EMAIL, self.SUBMITTER_USERNAME)
self.submitter_id = self.get_user_id_from_email(
self.SUBMITTER_EMAIL)
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.user_a_id = self.get_user_id_from_email(
self.USER_A_EMAIL)
self.set_admins([self.SUBMITTER_USERNAME])
def test_that_handler_works_correctly(self):
self.login(self.SUBMITTER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboarddatahandler', {
'data': {
'has_not_logged_in_for_n_days': 2,
'inactive_in_last_n_days': 5,
'created_at_least_n_exps': 1,
'created_fewer_than_n_exps': None,
'edited_at_least_n_exps': None,
'edited_fewer_than_n_exps': 2
}}, csrf_token=csrf_token)
self.logout()
query_models = user_models.UserQueryModel.query().fetch()
# Check that model is stored.
self.assertEqual(len(query_models), 1)
query_model = query_models[0]
# Check that correct information is stored in model.
self.assertEqual(query_model.has_not_logged_in_for_n_days, 2)
self.assertEqual(query_model.inactive_in_last_n_days, 5)
self.assertEqual(query_model.created_at_least_n_exps, 1)
self.assertEqual(query_model.edited_fewer_than_n_exps, 2)
self.assertIsNone(query_model.edited_at_least_n_exps)
self.assertIsNone(query_model.created_fewer_than_n_exps)
self.assertEqual(query_model.submitter_id, self.submitter_id)
# Check that MR job has been enqueued.
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
self.process_and_flush_pending_mapreduce_tasks()
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS),
0)
def test_query_status_check_handler_with_invalid_query_id_raises_400(
self):
self.login(self.SUBMITTER_EMAIL)
response = self.get_json(
'/querystatuscheck', params={'query_id': 'invalid_query_id'},
expected_status_int=400)
self.assertEqual(response['error'], 'Invalid query id.')
self.logout()
def test_query_status_check_handler(self):
self.login(self.SUBMITTER_EMAIL)
query_id = user_query_services.save_new_query_model(
self.submitter_id, inactive_in_last_n_days=10,
created_at_least_n_exps=5,
has_not_logged_in_for_n_days=30)
query_data = self.get_json(
'/querystatuscheck', params={'query_id': query_id})['query']
self.assertEqual(query_data['id'], query_id)
self.assertEqual(
query_data['status'], feconf.USER_QUERY_STATUS_PROCESSING)
self.assertEqual(
query_data['submitter_username'], self.SUBMITTER_USERNAME)
self.logout()
def test_that_page_is_accessible_to_authorised_users_only(self):
# Make sure that only authorised users can access query pages.
self.login(self.USER_A_EMAIL)
with self.assertRaisesRegexp(Exception, '401 Unauthorized'):
self.get_html_response('/emaildashboard')
with self.assertRaisesRegexp(Exception, '401 Unauthorized'):
self.get_html_response('/querystatuscheck')
self.logout()
def test_that_exception_is_raised_for_invalid_input(self):
self.login(self.SUBMITTER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboarddatahandler', {
'data': {
'has_not_logged_in_for_n_days': 2,
'inactive_in_last_n_days': 5,
'created_at_least_n_exps': 1,
'created_fewer_than_n_exps': 'None',
'edited_at_least_n_exps': None,
'fake_key': 2
}}, csrf_token=csrf_token, expected_status_int=400)
self.post_json(
'/emaildashboarddatahandler', {
'data': {
'has_not_logged_in_for_n_days': 2,
'inactive_in_last_n_days': 5,
'created_at_least_n_exps': 'invalid_value',
'created_fewer_than_n_exps': 'None',
'edited_at_least_n_exps': None
}}, csrf_token=csrf_token, expected_status_int=400)
self.logout()
def test_email_dashboard_page(self):
self.login(self.SUBMITTER_EMAIL)
response = self.get_html_response('/emaildashboard')
self.assertIn('{"title": "Email Dashboard - Oppia"})', response.body)
self.logout()
class EmailDashboardResultTests(test_utils.EmailTestBase):
"""Tests for email dashboard result handler."""
USER_A_EMAIL = 'a@example.com'
USER_A_USERNAME = 'a'
USER_B_EMAIL = 'b@example.com'
USER_B_USERNAME = 'b'
SUBMITTER_EMAIL = 'submi@example.com'
SUBMITTER_USERNAME = 'submit'
NEW_SUBMITTER_EMAIL = 'new_submi@example.com'
NEW_SUBMITTER_USERNAME = 'submit2'
EXP_ID_1 = 'exp_1'
EXP_ID_2 = 'exp_2'
def setUp(self):
super(EmailDashboardResultTests, self).setUp()
# User A has one created exploration.
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.user_a_id = self.get_user_id_from_email(
self.USER_A_EMAIL)
user_services.update_email_preferences(
self.user_a_id, True, True, True, True)
self.save_new_valid_exploration(
self.EXP_ID_1, self.user_a_id, end_state_name='End')
# User B has one created exploration.
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
self.user_b_id = self.get_user_id_from_email(
self.USER_B_EMAIL)
user_services.update_email_preferences(
self.user_b_id, True, True, True, True)
self.save_new_valid_exploration(
self.EXP_ID_2, self.user_b_id, end_state_name='End')
# Submitter and new_submitter are submitter of query.
self.signup(self.SUBMITTER_EMAIL, self.SUBMITTER_USERNAME)
self.submitter_id = self.get_user_id_from_email(
self.SUBMITTER_EMAIL)
self.signup(self.NEW_SUBMITTER_EMAIL, self.NEW_SUBMITTER_USERNAME)
self.new_submitter_id = self.get_user_id_from_email(
self.NEW_SUBMITTER_EMAIL)
self.set_admins(
[self.SUBMITTER_USERNAME, self.NEW_SUBMITTER_USERNAME])
def test_email_dashboard_result_page(self):
self.login(self.SUBMITTER_EMAIL)
query_id = user_models.UserQueryModel.get_new_id('')
user_models.UserQueryModel(
id=query_id, inactive_in_last_n_days=10,
has_not_logged_in_for_n_days=30,
created_at_least_n_exps=5,
created_fewer_than_n_exps=None,
edited_at_least_n_exps=None,
edited_fewer_than_n_exps=None,
submitter_id=self.submitter_id,
query_status=feconf.USER_QUERY_STATUS_COMPLETED,
user_ids=[]).put()
response = self.get_html_response('/emaildashboardresult/%s' % query_id)
self.assertIn(
'{"title": "Email Dashboard Result - Oppia"})', response.body)
self.logout()
def test_handler_with_invalid_num_queries_to_fetch_raises_error_400(self):
self.login(self.SUBMITTER_EMAIL)
response = self.get_json(
'/emaildashboarddatahandler',
params={'num_queries_to_fetch': '-5'},
expected_status_int=400)
self.assertEqual(
response['error'], '400 Invalid input for query results.')
response = self.get_json(
'/emaildashboarddatahandler',
params={'num_queries_to_fetch': 'invalid_data'},
expected_status_int=400)
self.assertEqual(
response['error'], '400 Invalid input for query results.')
self.logout()
def test_email_dashboard_data_handler(self):
self.login(self.SUBMITTER_EMAIL)
response = self.get_json(
'/emaildashboarddatahandler',
params={'num_queries_to_fetch': 1})
self.assertEqual(response['recent_queries'], [])
query_id = user_query_services.save_new_query_model(
self.submitter_id, inactive_in_last_n_days=10,
created_at_least_n_exps=5,
has_not_logged_in_for_n_days=30)
response = self.get_json(
'/emaildashboarddatahandler',
params={'num_queries_to_fetch': 1})
self.assertEqual(len(response['recent_queries']), 1)
recent_query = response['recent_queries'][0]
self.assertEqual(recent_query['id'], query_id)
self.assertEqual(
recent_query['status'], feconf.USER_QUERY_STATUS_PROCESSING)
self.logout()
def test_email_dashboard_result_page_with_invalid_query_id_raises_400(self):
self.login(self.SUBMITTER_EMAIL)
query_id = user_query_services.save_new_query_model(
self.submitter_id, inactive_in_last_n_days=10,
created_at_least_n_exps=5,
has_not_logged_in_for_n_days=30)
job_id = user_query_jobs_one_off.UserQueryOneOffJob.create_new()
user_query_jobs_one_off.UserQueryOneOffJob.enqueue(
job_id, additional_job_params={'query_id': query_id})
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS),
1)
# Complete execution of query.
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
self.process_and_flush_pending_mapreduce_tasks()
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS),
0)
csrf_token = self.get_new_csrf_token()
response = self.post_json(
'/emaildashboardresult/%s' % 'invalid_query_id', {},
csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(response['error'], '400 Invalid query id.')
self.logout()
def test_email_dashboard_result_page_with_mismatch_of_query_id_raises_401(
self):
self.login(self.SUBMITTER_EMAIL)
query_id = user_query_services.save_new_query_model(
self.submitter_id, inactive_in_last_n_days=10,
created_at_least_n_exps=5,
has_not_logged_in_for_n_days=30)
query_id_1 = user_query_services.save_new_query_model(
self.new_submitter_id, inactive_in_last_n_days=10,
created_at_least_n_exps=5,
has_not_logged_in_for_n_days=30)
job_id = user_query_jobs_one_off.UserQueryOneOffJob.create_new()
user_query_jobs_one_off.UserQueryOneOffJob.enqueue(
job_id, additional_job_params={'query_id': query_id})
job_id_1 = user_query_jobs_one_off.UserQueryOneOffJob.create_new()
user_query_jobs_one_off.UserQueryOneOffJob.enqueue(
job_id_1, additional_job_params={'query_id': query_id_1})
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS),
2)
# Complete execution of query.
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
self.process_and_flush_pending_mapreduce_tasks()
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS),
0)
csrf_token = self.get_new_csrf_token()
# Raises authorization error when passing a query id whose associated
# query model is not created by the logged in user.
response = self.post_json(
'/emaildashboardresult/%s' % query_id_1, {},
csrf_token=csrf_token, expected_status_int=401)
self.assertEqual(
response['error'],
'%s is not an authorized user for this query.'
% (self.submitter_id))
self.logout()
def test_cancel_email_handler_with_invalid_query_id_raises_400(self):
self.login(self.SUBMITTER_EMAIL)
query_id = user_query_services.save_new_query_model(
self.submitter_id, inactive_in_last_n_days=10,
created_at_least_n_exps=5,
has_not_logged_in_for_n_days=30)
job_id = user_query_jobs_one_off.UserQueryOneOffJob.create_new()
user_query_jobs_one_off.UserQueryOneOffJob.enqueue(
job_id, additional_job_params={'query_id': query_id})
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS),
1)
# Complete execution of query.
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
self.process_and_flush_pending_mapreduce_tasks()
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS),
0)
csrf_token = self.get_new_csrf_token()
response = self.post_json(
'/emaildashboardcancelresult/%s' % 'invalid_query_id', {},
csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(response['error'], '400 Invalid query id.')
self.logout()
def test_cancel_email_handler_with_mismatch_of_query_id_raises_401(self):
self.login(self.SUBMITTER_EMAIL)
query_id = user_query_services.save_new_query_model(
self.submitter_id, inactive_in_last_n_days=10,
created_at_least_n_exps=5,
has_not_logged_in_for_n_days=30)
query_id_1 = user_query_services.save_new_query_model(
self.new_submitter_id, inactive_in_last_n_days=10,
created_at_least_n_exps=5,
has_not_logged_in_for_n_days=30)
job_id = user_query_jobs_one_off.UserQueryOneOffJob.create_new()
user_query_jobs_one_off.UserQueryOneOffJob.enqueue(
job_id, additional_job_params={'query_id': query_id})
job_id_1 = user_query_jobs_one_off.UserQueryOneOffJob.create_new()
user_query_jobs_one_off.UserQueryOneOffJob.enqueue(
job_id_1, additional_job_params={'query_id': query_id_1})
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS),
2)
# Complete execution of query.
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
self.process_and_flush_pending_mapreduce_tasks()
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS),
0)
csrf_token = self.get_new_csrf_token()
# Raises authorization error when passing a query id whose associated
# query model is not created by the logged in user.
response = self.post_json(
'/emaildashboardcancelresult/%s' % query_id_1, {},
csrf_token=csrf_token, expected_status_int=401)
self.assertEqual(
response['error'],
'%s is not an authorized user for this query.'
% (self.submitter_id))
self.logout()
def test_bulk_email_handler_with_invalid_query_id_raises_400(self):
self.login(self.SUBMITTER_EMAIL)
query_id = user_query_services.save_new_query_model(
self.submitter_id, inactive_in_last_n_days=10,
created_at_least_n_exps=5,
has_not_logged_in_for_n_days=30)
job_id = user_query_jobs_one_off.UserQueryOneOffJob.create_new()
user_query_jobs_one_off.UserQueryOneOffJob.enqueue(
job_id, additional_job_params={'query_id': query_id})
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS),
1)
# Complete execution of query.
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
self.process_and_flush_pending_mapreduce_tasks()
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS),
0)
csrf_token = self.get_new_csrf_token()
response = self.post_json(
'/emaildashboardtestbulkemailhandler/%s' % 'invalid_query_id', {},
csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(response['error'], '400 Invalid query id.')
self.logout()
def test_bulk_email_handler_with_mismatch_of_query_id_raises_401(self):
self.login(self.SUBMITTER_EMAIL)
query_id = user_query_services.save_new_query_model(
self.submitter_id, inactive_in_last_n_days=10,
created_at_least_n_exps=5,
has_not_logged_in_for_n_days=30)
query_id_1 = user_query_services.save_new_query_model(
self.new_submitter_id, inactive_in_last_n_days=10,
created_at_least_n_exps=5,
has_not_logged_in_for_n_days=30)
job_id = user_query_jobs_one_off.UserQueryOneOffJob.create_new()
user_query_jobs_one_off.UserQueryOneOffJob.enqueue(
job_id, additional_job_params={'query_id': query_id})
job_id_1 = user_query_jobs_one_off.UserQueryOneOffJob.create_new()
user_query_jobs_one_off.UserQueryOneOffJob.enqueue(
job_id_1, additional_job_params={'query_id': query_id_1})
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS),
2)
# Complete execution of query.
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
self.process_and_flush_pending_mapreduce_tasks()
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS),
0)
csrf_token = self.get_new_csrf_token()
# Raises authorization error when passing a query id whose associated
# query model is not created by the logged in user.
response = self.post_json(
'/emaildashboardtestbulkemailhandler/%s' % query_id_1, {},
csrf_token=csrf_token, expected_status_int=401)
self.assertEqual(
response['error'],
'%s is not an authorized user for this query.'
% (self.submitter_id))
self.logout()
def test_that_correct_emails_are_sent_to_all_users(self):
self.login(self.SUBMITTER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboarddatahandler', {
'data': {
'has_not_logged_in_for_n_days': None,
'inactive_in_last_n_days': None,
'created_at_least_n_exps': 1,
'created_fewer_than_n_exps': None,
'edited_at_least_n_exps': None,
'edited_fewer_than_n_exps': None
}}, csrf_token=csrf_token)
self.logout()
query_models = user_models.UserQueryModel.query().fetch()
# Check that model is stored.
self.assertEqual(len(query_models), 1)
query_model = query_models[0]
# Check that MR job has been enqueued.
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
self.process_and_flush_pending_mapreduce_tasks()
# Check that qualified users are valid.
query_models = user_models.UserQueryModel.query().fetch()
self.assertEqual(len(query_models[0].user_ids), 2)
self.assertEqual(
sorted(query_models[0].user_ids),
sorted([self.user_a_id, self.user_b_id]))
# Check that query completion email is sent to submitter.
messages = self._get_sent_email_messages(
self.SUBMITTER_EMAIL)
self.assertEqual(len(messages), 1)
# Send email from email dashboard result page.
self.login(self.SUBMITTER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboardresult/%s' % query_model.id, {
'data': {
'email_subject': 'subject',
'email_body': 'body',
'max_recipients': None,
'email_intent': 'bulk_email_marketing'
}}, csrf_token=csrf_token)
self.logout()
# Check that emails are sent to qualified users.
messages_a = self._get_sent_email_messages(
self.USER_A_EMAIL)
self.assertEqual(len(messages_a), 1)
self.assertEqual(
messages_a[0].html.decode(), 'body')
self.assertEqual(
messages_a[0].body.decode(), 'body')
messages_b = self._get_sent_email_messages(
self.USER_B_EMAIL)
self.assertEqual(len(messages_b), 1)
self.assertEqual(
messages_b[0].html.decode(), 'body')
self.assertEqual(
messages_b[0].body.decode(), 'body')
# Check that correct email model is stored in backend.
query_models = user_models.UserQueryModel.query().fetch()
sent_email_model = email_models.BulkEmailModel.get(
query_models[0].sent_email_model_id)
self.assertEqual(
sent_email_model.subject, 'subject')
self.assertEqual(
sent_email_model.html_body, 'body')
self.assertEqual(
sorted(sent_email_model.recipient_ids),
sorted([self.user_a_id, self.user_b_id]))
self.assertEqual(
sent_email_model.sender_id, self.submitter_id)
self.assertEqual(
sent_email_model.sender_email,
'%s <%s>' % (self.SUBMITTER_USERNAME, self.SUBMITTER_EMAIL))
self.assertEqual(
sent_email_model.intent,
feconf.BULK_EMAIL_INTENT_MARKETING)
# Check that BulkEmailModel id is stored in UsetBulkEmailModel of
# recipients.
recipient_a = user_models.UserBulkEmailsModel.get(self.user_a_id)
self.assertEqual(
recipient_a.sent_email_model_ids,
[query_models[0].sent_email_model_id])
recipient_b = user_models.UserBulkEmailsModel.get(self.user_b_id)
self.assertEqual(
recipient_b.sent_email_model_ids,
[query_models[0].sent_email_model_id])
def test_that_valid_exceptions_are_raised(self):
# Check that exception is raised for incorrect query id.
self.login(self.SUBMITTER_EMAIL)
with self.assertRaisesRegexp(Exception, '400 Bad Request'):
self.get_html_response('/emaildashboardresult/%s' % 'q123')
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboarddatahandler', {
'data': {
'has_not_logged_in_for_n_days': None,
'inactive_in_last_n_days': None,
'created_at_least_n_exps': 1,
'created_fewer_than_n_exps': None,
'edited_at_least_n_exps': None,
'edited_fewer_than_n_exps': None
}}, csrf_token=csrf_token)
query_models = user_models.UserQueryModel.query().fetch()
# Check that exception is raised if query is still processing.
self.assertEqual(
query_models[0].query_status, feconf.USER_QUERY_STATUS_PROCESSING)
with self.assertRaisesRegexp(Exception, '400 Bad Request'):
self.get_html_response(
'/emaildashboardresult/%s' % query_models[0].id)
self.logout()
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
# Complete execution of query.
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
self.process_and_flush_pending_mapreduce_tasks()
query_models = user_models.UserQueryModel.query().fetch()
self.assertEqual(
query_models[0].query_status,
feconf.USER_QUERY_STATUS_COMPLETED)
# Check that exception is raised for unauthorized user.
self.login(self.USER_A_EMAIL)
with self.assertRaisesRegexp(Exception, '401 Unauthorized'):
self.get_html_response(
'/emaildashboardresult/%s' % query_models[0].id)
self.logout()
# Check that exception is raised if current user is not submitter of
# that query.
self.login(self.NEW_SUBMITTER_EMAIL)
with self.assertRaisesRegexp(Exception, '401 Unauthorized'):
self.get_html_response(
'/emaildashboardresult/%s' % query_models[0].id)
self.logout()
# Check that exception is raised for accessing query result after
# query result has been used.
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
# Send email from email dashboard result page.
self.login(self.SUBMITTER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboardresult/%s' % query_models[0].id, {
'data': {
'email_subject': 'subject',
'email_body': 'body',
'max_recipients': 1,
'email_intent': 'bulk_email_marketing'
}}, csrf_token=csrf_token)
self.logout()
query_models = user_models.UserQueryModel.query().fetch()
self.assertEqual(
query_models[0].query_status, feconf.USER_QUERY_STATUS_ARCHIVED)
self.assertTrue(query_models[0].deleted)
self.login(self.SUBMITTER_EMAIL)
with self.assertRaisesRegexp(Exception, '400 Bad Request'):
self.get_html_response(
'/emaildashboardresult/%s' % query_models[0].id)
self.logout()
def test_that_correct_emails_are_sent_to_max_n_recipients(self):
self.login(self.SUBMITTER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboarddatahandler', {
'data': {
'has_not_logged_in_for_n_days': None,
'inactive_in_last_n_days': None,
'created_at_least_n_exps': 1,
'created_fewer_than_n_exps': None,
'edited_at_least_n_exps': None,
'edited_fewer_than_n_exps': None
}}, csrf_token=csrf_token)
self.logout()
query_models = user_models.UserQueryModel.query().fetch()
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
self.process_and_flush_pending_mapreduce_tasks()
# Check that qualified users are valid.
query_models = user_models.UserQueryModel.query().fetch()
self.assertEqual(len(query_models[0].user_ids), 2)
# Send email from email dashboard result page.
self.login(self.SUBMITTER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboardresult/%s' % query_models[0].id, {
'data': {
'email_subject': 'subject',
'email_body': 'body',
'max_recipients': 1,
'email_intent': 'bulk_email_marketing'
}}, csrf_token=csrf_token)
self.logout()
# Check that emails are sent to max n qualified users.
# One email is sent to submitter for query completion and second
# is sent to one of the 2 qualified users.
messages = self._get_sent_email_messages(
self.SUBMITTER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].to, [self.SUBMITTER_EMAIL])
messages_a = self._get_sent_email_messages(
self.USER_A_EMAIL)
messages_b = self._get_sent_email_messages(
self.USER_B_EMAIL)
self.assertEqual(sorted([len(messages_a), len(messages_b)]), [0, 1])
def test_that_no_emails_are_sent_if_query_is_canceled(self):
self.login(self.SUBMITTER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboarddatahandler', {
'data': {
'has_not_logged_in_for_n_days': None,
'inactive_in_last_n_days': None,
'created_at_least_n_exps': 1,
'created_fewer_than_n_exps': None,
'edited_at_least_n_exps': None,
'edited_fewer_than_n_exps': None
}}, csrf_token=csrf_token)
self.logout()
query_models = user_models.UserQueryModel.query().fetch()
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
self.process_and_flush_pending_mapreduce_tasks()
# Check that qualified users are valid.
query_models = user_models.UserQueryModel.query().fetch()
self.assertEqual(len(query_models[0].user_ids), 2)
# Send email from email dashboard result page.
self.login(self.SUBMITTER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboardcancelresult/%s' % query_models[0].id, {},
csrf_token=csrf_token)
self.logout()
query_models = user_models.UserQueryModel.query().fetch()
self.assertEqual(
query_models[0].query_status, feconf.USER_QUERY_STATUS_ARCHIVED)
self.assertTrue(query_models[0].deleted)
# Check that no email is sent to qualified users.
messages_a = self._get_sent_email_messages(
self.USER_A_EMAIL)
self.assertEqual(len(messages_a), 0)
messages_b = self._get_sent_email_messages(
self.USER_B_EMAIL)
self.assertEqual(len(messages_b), 0)
def test_that_test_email_for_bulk_emails_is_sent(self):
self.login(self.SUBMITTER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboarddatahandler', {
'data': {
'has_not_logged_in_for_n_days': None,
'inactive_in_last_n_days': None,
'created_at_least_n_exps': 1,
'created_fewer_than_n_exps': None,
'edited_at_least_n_exps': None,
'edited_fewer_than_n_exps': None
}}, csrf_token=csrf_token)
self.logout()
query_models = user_models.UserQueryModel.query().fetch()
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
self.process_and_flush_pending_mapreduce_tasks()
email_subject = 'email_subject'
email_body = 'email_body'
# Check that correct test email is sent.
self.login(self.SUBMITTER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboardtestbulkemailhandler/%s' % query_models[0].id, {
'email_body': email_body,
'email_subject': email_subject
}, csrf_token=csrf_token)
self.logout()
# Check that correct test email is sent to submitter of query.
# One email is sent when query is completed and other is test email.
test_email_html_body = (
'[This is a test email.]<br><br> %s' % email_body)
test_email_text_body = '[This is a test email.]\n\n %s' % email_body
messages = self._get_sent_email_messages(
self.SUBMITTER_EMAIL)
self.assertEqual(len(messages), 2)
self.assertEqual(
messages[1].html.decode(), test_email_html_body)
self.assertEqual(
messages[1].body.decode(), test_email_text_body)
all_model = email_models.SentEmailModel.query().fetch()
self.assertEqual(len(all_model), 2)
sent_email_model = all_model[0]
self.assertEqual(
sent_email_model.subject, email_subject)
self.assertEqual(
sent_email_model.html_body, test_email_html_body)
self.assertEqual(
sent_email_model.recipient_id, query_models[0].submitter_id)
self.assertEqual(
sent_email_model.sender_id, query_models[0].submitter_id)
self.assertEqual(
sent_email_model.intent, feconf.BULK_EMAIL_INTENT_TEST)
def test_that_test_email_is_not_sent_to_query_recipients(self):
self.login(self.SUBMITTER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboarddatahandler', {
'data': {
'has_not_logged_in_for_n_days': None,
'inactive_in_last_n_days': None,
'created_at_least_n_exps': 1,
'created_fewer_than_n_exps': None,
'edited_at_least_n_exps': None,
'edited_fewer_than_n_exps': None
}}, csrf_token=csrf_token)
self.logout()
query_models = user_models.UserQueryModel.query().fetch()
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
self.process_and_flush_pending_mapreduce_tasks()
self.login(self.SUBMITTER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'/emaildashboardtestbulkemailhandler/%s' % query_models[0].id, {
'email_body': 'email_body',
'email_subject': 'email_subject'
}, csrf_token=csrf_token)
self.logout()
# Check that test email is sent to submitter of query.
# One email is sent when query is completed and other is test email.
messages = self._get_sent_email_messages(
self.SUBMITTER_EMAIL)
self.assertEqual(len(messages), 2)
# Check that no emails are sent to query recipients.
query_models = user_models.UserQueryModel.query().fetch()
query_model = query_models[0]
self.assertEqual(len(query_model.user_ids), 2)
self.assertEqual(
sorted(query_model.user_ids),
sorted([self.user_a_id, self.user_b_id]))
# Check that no emails are sent to user A or user B.
messages_a = self._get_sent_email_messages(
self.USER_A_EMAIL)
self.assertEqual(len(messages_a), 0)
messages_b = self._get_sent_email_messages(
self.USER_B_EMAIL)
self.assertEqual(len(messages_b), 0)
| 41.503348
| 80
| 0.631377
| 4,447
| 37,187
| 4.871149
| 0.063189
| 0.034069
| 0.027606
| 0.018281
| 0.859524
| 0.821115
| 0.776521
| 0.742314
| 0.720986
| 0.703628
| 0
| 0.010683
| 0.285127
| 37,187
| 895
| 81
| 41.549721
| 0.804168
| 0.083658
| 0
| 0.772134
| 0
| 0
| 0.11418
| 0.062414
| 0
| 0
| 0
| 0
| 0.13643
| 1
| 0.033382
| false
| 0
| 0.014514
| 0
| 0.071118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ab4e47bb1da9e237e625bd2c8e11ec97270eb110
| 2,639
|
py
|
Python
|
Module_06/tests/sauce_lab/test_contact_info.py
|
JoseGtz/2021_python_selenium
|
c7b39479c78839ba2e2e2633a0f673a8b02fb4cb
|
[
"Unlicense"
] | null | null | null |
Module_06/tests/sauce_lab/test_contact_info.py
|
JoseGtz/2021_python_selenium
|
c7b39479c78839ba2e2e2633a0f673a8b02fb4cb
|
[
"Unlicense"
] | null | null | null |
Module_06/tests/sauce_lab/test_contact_info.py
|
JoseGtz/2021_python_selenium
|
c7b39479c78839ba2e2e2633a0f673a8b02fb4cb
|
[
"Unlicense"
] | null | null | null |
import pytest
from Module_06.src.pages.login import LoginPage
from Module_06.tests.common.test_base import TestBase
_DEF_USER = 'standard_user'
_DEF_PASSWORD = 'secret_sauce'
class TestContactInfo(TestBase):
@pytest.mark.sanity
@pytest.mark.regression
@pytest.mark.contact_info
def test_contact_info_incomplete_LastName(self):
login = LoginPage(self.driver)
login.open()
inventory = login.login(_DEF_USER, _DEF_PASSWORD)
for item in inventory.products:
item.add_to_cart()
cart_page = inventory.open_cart()
contact_info_page = cart_page.checkout()
contact_info_page.info(firstname="Jose Luis")
contact_info_page.checkout()
assert contact_info_page.get_error_msg() == "Error: Last Name is required"
@pytest.mark.sanity
@pytest.mark.regression
@pytest.mark.contact_info
def test_contact_info_incomplete_FirstName(self):
login = LoginPage(self.driver)
login.open()
inventory = login.login(_DEF_USER, _DEF_PASSWORD)
for item in inventory.products:
item.add_to_cart()
cart_page = inventory.open_cart()
contact_info_page = cart_page.checkout()
contact_info_page.info(lastname="Gutierrez")
contact_info_page.checkout()
assert contact_info_page.get_error_msg() == "Error: First Name is required"
@pytest.mark.sanity
@pytest.mark.regression
@pytest.mark.contact_info
def test_contact_info_incomplete_PostalCode(self):
login = LoginPage(self.driver)
login.open()
inventory = login.login(_DEF_USER, _DEF_PASSWORD)
for item in inventory.products:
item.add_to_cart()
cart_page = inventory.open_cart()
contact_info_page = cart_page.checkout()
contact_info_page.info(firstname="Jose Luis", lastname="Gutierrez")
contact_info_page.checkout()
assert contact_info_page.get_error_msg() == "Error: Postal Code is required"
@pytest.mark.sanity
@pytest.mark.regression
@pytest.mark.contact_info
def test_navigation_back(self):
login = LoginPage(self.driver)
login.open()
inventory = login.login(_DEF_USER, _DEF_PASSWORD)
for item in inventory.products:
item.add_to_cart()
cart_page = inventory.open_cart()
contact_info_page = cart_page.checkout()
contact_info_page.info(firstname="Jose Luis", lastname="Gutierrez", postal_code="555555")
contact_info_page.back_to_cart()
assert cart_page.get_title() == 'YOUR CART', 'Cart page label should be "YOUR CART"'
| 37.7
| 97
| 0.690034
| 330
| 2,639
| 5.20303
| 0.2
| 0.140944
| 0.131043
| 0.051252
| 0.810134
| 0.810134
| 0.810134
| 0.810134
| 0.810134
| 0.810134
| 0
| 0.004836
| 0.21637
| 2,639
| 69
| 98
| 38.246377
| 0.825435
| 0
| 0
| 0.693548
| 0
| 0
| 0.082607
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 1
| 0.064516
| false
| 0.080645
| 0.048387
| 0
| 0.129032
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
db3e502b35cb182b07fe8a8d62d15c3e259c683e
| 4,479
|
py
|
Python
|
test/unit/test_cryptodexd_data_shims.py
|
CryptoDEX/sentinel
|
88ac417f34c4f42670298abf3f252ab8652b7887
|
[
"MIT"
] | null | null | null |
test/unit/test_cryptodexd_data_shims.py
|
CryptoDEX/sentinel
|
88ac417f34c4f42670298abf3f252ab8652b7887
|
[
"MIT"
] | null | null | null |
test/unit/test_cryptodexd_data_shims.py
|
CryptoDEX/sentinel
|
88ac417f34c4f42670298abf3f252ab8652b7887
|
[
"MIT"
] | null | null | null |
import pytest
import sys
import os
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../test_sentinel.conf'))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../lib')))
import cryptodexlib
@pytest.fixture
def sentinel_proposal_hex():
return '5b2270726f706f73616c222c207b22656e645f65706f6368223a20313439313032323830302c20226e616d65223a2022626565722d7265696d62757273656d656e742d37222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a20372e30303030303030302c202273746172745f65706f6368223a20313438333235303430302c202275726c223a202268747470733a2f2f6461736863656e7472616c2e636f6d2f626565722d7265696d62757273656d656e742d37227d5d'
@pytest.fixture
def sentinel_superblock_hex():
return '5b227375706572626c6f636b222c207b226576656e745f626c6f636b5f686569676874223a2036323530302c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795443363268755234595145506e39414a486a6e517878726548536267416f617456222c20227061796d656e745f616d6f756e7473223a2022357c33227d5d'
@pytest.fixture
def cryptodexd_proposal_hex():
return '5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20313439313336383430302c20226e616d65223a2022626565722d7265696d62757273656d656e742d39222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2034392e30303030303030302c202273746172745f65706f6368223a20313438333235303430302c202274797065223a20312c202275726c223a202268747470733a2f2f7777772e6461736863656e7472616c2e6f72672f702f626565722d7265696d62757273656d656e742d39227d5d5d'
@pytest.fixture
def cryptodexd_superblock_hex():
return '5b5b2274726967676572222c207b226576656e745f626c6f636b5f686569676874223a2036323530302c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795443363268755234595145506e39414a486a6e517878726548536267416f617456222c20227061796d656e745f616d6f756e7473223a2022357c33222c202274797065223a20327d5d5d'
# ========================================================================
def test_SHIM_deserialise_from_cryptodexd(cryptodexd_proposal_hex, cryptodexd_superblock_hex):
assert cryptodexlib.SHIM_deserialise_from_cryptodexd(cryptodexd_proposal_hex) == '5b2270726f706f73616c222c207b22656e645f65706f6368223a20313439313336383430302c20226e616d65223a2022626565722d7265696d62757273656d656e742d39222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2034392e30303030303030302c202273746172745f65706f6368223a20313438333235303430302c202275726c223a202268747470733a2f2f7777772e6461736863656e7472616c2e6f72672f702f626565722d7265696d62757273656d656e742d39227d5d'
assert cryptodexlib.SHIM_deserialise_from_cryptodexd(cryptodexd_superblock_hex) == '5b227375706572626c6f636b222c207b226576656e745f626c6f636b5f686569676874223a2036323530302c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795443363268755234595145506e39414a486a6e517878726548536267416f617456222c20227061796d656e745f616d6f756e7473223a2022357c33227d5d'
def test_SHIM_serialise_for_cryptodexd(sentinel_proposal_hex, sentinel_superblock_hex):
assert cryptodexlib.SHIM_serialise_for_cryptodexd(sentinel_proposal_hex) == '5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20313439313032323830302c20226e616d65223a2022626565722d7265696d62757273656d656e742d37222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a20372e30303030303030302c202273746172745f65706f6368223a20313438333235303430302c202274797065223a20312c202275726c223a202268747470733a2f2f6461736863656e7472616c2e636f6d2f626565722d7265696d62757273656d656e742d37227d5d5d'
assert cryptodexlib.SHIM_serialise_for_cryptodexd(sentinel_superblock_hex) == '5b5b2274726967676572222c207b226576656e745f626c6f636b5f686569676874223a2036323530302c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795443363268755234595145506e39414a486a6e517878726548536267416f617456222c20227061796d656e745f616d6f756e7473223a2022357c33222c202274797065223a20327d5d5d'
| 114.846154
| 588
| 0.935253
| 135
| 4,479
| 30.62963
| 0.266667
| 0.008706
| 0.015478
| 0.02104
| 0.107134
| 0.100846
| 0.100846
| 0.019831
| 0.019831
| 0.019831
| 0
| 0.678245
| 0.022996
| 4,479
| 38
| 589
| 117.868421
| 0.266682
| 0.016075
| 0
| 0.166667
| 0
| 0
| 0.772985
| 0.767537
| 0
| 1
| 0
| 0
| 0.166667
| 1
| 0.25
| false
| 0
| 0.166667
| 0.166667
| 0.583333
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 10
|
db6c7733483e2beae837651b114a6c40ecad62b1
| 16,133
|
py
|
Python
|
tests/data/quantities/test_maths.py
|
Andy-Wilkinson/ChemMLToolk
|
83efc7ea66d2def860a3e04ccd70d77fb689fddc
|
[
"MIT"
] | 1
|
2019-10-30T03:43:24.000Z
|
2019-10-30T03:43:24.000Z
|
tests/data/quantities/test_maths.py
|
Andy-Wilkinson/ChemMLToolk
|
83efc7ea66d2def860a3e04ccd70d77fb689fddc
|
[
"MIT"
] | 2
|
2021-11-28T21:09:30.000Z
|
2021-11-28T21:09:39.000Z
|
tests/data/quantities/test_maths.py
|
Andy-Wilkinson/ChemMLToolkit
|
83efc7ea66d2def860a3e04ccd70d77fb689fddc
|
[
"MIT"
] | null | null | null |
import pytest
import chemmltoolkit.data.quantities as quant
class TestMaths(object):
@pytest.mark.parametrize("a_str,b_str", [
('10', '10'),
('-10', '10'),
('0.001', '0.001'),
('-0.001', '0.001'),
('>10', '>10'),
('<-10', '>10'),
('<10', '>=0'),
('>-10', '>=0'),
('>=10', '>=10'),
('<=-10', '>=10'),
('<=10', '>=0'),
('>=-10', '>=0'),
('~10', '~10'),
('~-10', '~10'),
('-3--2', '2-3'),
('-2-0', '0-2'),
('-2-1', '0-2'),
('-1-2', '0-2'),
('0-2', '0-2'),
('2-3', '2-3'),
])
def test_absolute(self, a_str: str, b_str: str):
a = quant.from_string(a_str)
b = quant.from_string(b_str)
assert quant.isclose(quant.abs(a), b)
assert quant.isclose(abs(a), b)
@pytest.mark.parametrize("a_str,b_str,expected_result_str", [
('10', '20', '30'),
('10', '-20', '-10'),
('0.001', '0.002', '0.003'),
('>10', '10', '>20'),
('>10', '>10', '>20'),
('<10', '10', '<20'),
('<10', '<10', '<20'),
('>=10', '10', '>=20'),
('>=10', '>=10', '>=20'),
('<=10', '10', '<=20'),
('<=10', '<=10', '<=20'),
('~10', '10', '~20'),
('~10', '~10', '~20'),
('>10', '>=10', '>20'),
('<10', '<=10', '<20'),
('10', '2-3', '12-13'),
('10-20', '2-3', '12-23'),
('>10', '2-3', '>12'),
('<10', '2-3', '<13'),
('>=10', '2-3', '>=12'),
('<=10', '2-3', '<=13'),
])
def test_add(self, a_str: str, b_str: str, expected_result_str: str):
a = quant.from_string(a_str)
b = quant.from_string(b_str)
expected_result = quant.from_string(expected_result_str)
assert quant.isclose(quant.add(a, b), expected_result)
assert quant.isclose(a + b, expected_result)
assert quant.isclose(quant.add(b, a), expected_result)
assert quant.isclose(b + a, expected_result)
@pytest.mark.parametrize("a_str,b_str,expected_result", [
('10', '20', False),
('10', '10', True),
('0.001', '0.001', True),
('10', '10.000000001', True),
('10', '>10', False),
('10', '>10.000000001', False),
('>10', '>10', True),
('>10', '>10.000000001', True),
])
def test_isclose(self, a_str: str, b_str: str, expected_result: bool):
a = quant.from_string(a_str)
b = quant.from_string(b_str)
assert quant.isclose(a, b) == expected_result
assert quant.isclose(b, a) == expected_result
@pytest.mark.parametrize("a_str,b_str,expected_result", [
('10', '10', True),
('10', '20', False),
('0.001', '0.001', True),
('>10', '>10', True),
('<10', '<10', True),
('>10', '<10', False),
('10.0-10.5', '10.0-10.5', True),
('10', '10.0-10.5', False),
('10.5', '10.0-10.5', False),
('10.0-10.8', '10.0-10.5', False),
('10.2-10.5', '10.0-10.5', False),
])
def test_equality(self, a_str: str, b_str: str, expected_result: bool):
a = quant.from_string(a_str)
b = quant.from_string(b_str)
assert (a == b) == expected_result
@pytest.mark.parametrize("a_str,b_str,expected_result", [
('10', '20', False),
('20', '10', True),
('10', '10', False),
('>10', '20', None),
('>20', '10', True),
('>10', '10', True),
('<10', '20', False),
('<20', '10', None),
('<10', '10', False),
('>=10', '20', None),
('>=20', '10', True),
('>=10', '10', None),
('<=10', '20', False),
('<=20', '10', None),
('<=10', '10', False),
('10', '>20', False),
('20', '>10', None),
('10', '>10', False),
('10', '<20', None),
('20', '<10', True),
('10', '<10', True),
('10', '>=20', False),
('20', '>=10', None),
('10', '>=10', False),
('10', '<=20', None),
('20', '<=10', True),
('10', '<=10', None),
('<10', '<20', None),
('>10', '<20', None),
('<10', '>20', False),
('>10', '>20', None),
('<20', '<10', None),
('>20', '<10', True),
('<20', '>10', None),
('>20', '>10', None),
('<10', '<10', None),
('>10', '<10', True),
('<10', '>10', False),
('>10', '>10', None),
('<=10', '<=20', None),
('>=10', '<=20', None),
('<=10', '>=20', False),
('>=10', '>=20', None),
('<=20', '<=10', None),
('>=20', '<=10', True),
('<=20', '>=10', None),
('>=20', '>=10', None),
('<=10', '<=10', None),
('>=10', '<=10', None),
('<=10', '>=10', False),
('>=10', '>=10', None),
('20', '30-40', False),
('20', '20-30', False),
('20', '10-30', None),
('20', '10-20', None),
('20', '5-10', True),
('30-40', '20', True),
('20-30', '20', None),
('10-30', '20', None),
('10-20', '20', False),
('5-10', '20', False),
('10-20', '30-40', False),
('10-20', '20-30', False),
('10-20', '15-25', None),
('10-20', '10-20', None),
('10-20', '5-15', None),
('10-20', '5-10', None),
('10-20', '5-9', True),
])
def test_greater(self, a_str: str, b_str: str, expected_result: bool):
a = quant.from_string(a_str)
b = quant.from_string(b_str)
assert quant.greater(a, b) == expected_result
assert (a > b) == expected_result
@pytest.mark.parametrize("a_str,b_str,expected_result", [
('10', '20', False),
('20', '10', True),
('10', '10', True),
('>10', '20', None),
('>20', '10', True),
('>10', '10', True),
('<10', '20', False),
('<20', '10', None),
('<10', '10', False),
('>=10', '20', None),
('>=20', '10', True),
('>=10', '10', True),
('<=10', '20', False),
('<=20', '10', None),
('<=10', '10', None),
('10', '>20', False),
('20', '>10', None),
('10', '>10', False),
('10', '<20', None),
('20', '<10', True),
('10', '<10', True),
('10', '>=20', False),
('20', '>=10', None),
('10', '>=10', None),
('10', '<=20', None),
('20', '<=10', True),
('10', '<=10', True),
('<10', '<20', None),
('>10', '<20', None),
('<10', '>20', False),
('>10', '>20', None),
('<20', '<10', None),
('>20', '<10', True),
('<20', '>10', None),
('>20', '>10', None),
('<10', '<10', None),
('>10', '<10', True),
('<10', '>10', False),
('>10', '>10', None),
('<=10', '<=20', None),
('>=10', '<=20', None),
('<=10', '>=20', False),
('>=10', '>=20', None),
('<=20', '<=10', None),
('>=20', '<=10', True),
('<=20', '>=10', None),
('>=20', '>=10', None),
('<=10', '<=10', None),
('>=10', '<=10', True),
('<=10', '>=10', None),
('>=10', '>=10', None),
('20', '30-40', False),
('20', '20-30', None),
('20', '10-30', None),
('20', '10-20', True),
('20', '5-10', True),
('30-40', '20', True),
('20-30', '20', True),
('10-30', '20', None),
('10-20', '20', None),
('5-10', '20', False),
('10-20', '30-40', False),
('10-20', '20-30', None),
('10-20', '15-25', None),
('10-20', '10-20', None),
('10-20', '5-15', None),
('10-20', '5-10', True),
('10-20', '5-9', True),
])
def test_greater_equal(self, a_str: str, b_str: str,
expected_result: bool):
a = quant.from_string(a_str)
b = quant.from_string(b_str)
assert quant.greater_equal(a, b) == expected_result
assert (a >= b) == expected_result
@pytest.mark.parametrize("a_str,result_str", [
('100', '2'),
('0.01', '-2'),
('>100', '>2'),
('<100', '<2'),
('>=100', '>=2'),
('<=100', '<=2'),
('~100', '~2'),
('100-1000', '2-3'),
])
def test_log10(self, a_str: str, result_str: str):
a = quant.from_string(a_str)
result = quant.from_string(result_str)
assert quant.isclose(quant.log10(a), result)
@pytest.mark.parametrize("a_str,b_str,expected_result", [
('10', '20', True),
('20', '10', False),
('10', '10', False),
('>10', '20', None),
('>20', '10', False),
('>10', '10', False),
('<10', '20', True),
('<20', '10', None),
('<10', '10', True),
('>=10', '20', None),
('>=20', '10', False),
('>=10', '10', False),
('<=10', '20', True),
('<=20', '10', None),
('<=10', '10', None),
('10', '>20', True),
('20', '>10', None),
('10', '>10', True),
('10', '<20', None),
('20', '<10', False),
('10', '<10', False),
('10', '>=20', True),
('20', '>=10', None),
('10', '>=10', None),
('10', '<=20', None),
('20', '<=10', False),
('10', '<=10', False),
('<10', '<20', None),
('>10', '<20', None),
('<10', '>20', True),
('>10', '>20', None),
('<20', '<10', None),
('>20', '<10', False),
('<20', '>10', None),
('>20', '>10', None),
('<10', '<10', None),
('>10', '<10', False),
('<10', '>10', True),
('>10', '>10', None),
('<=10', '<=20', None),
('>=10', '<=20', None),
('<=10', '>=20', True),
('>=10', '>=20', None),
('<=20', '<=10', None),
('>=20', '<=10', False),
('<=20', '>=10', None),
('>=20', '>=10', None),
('<=10', '<=10', None),
('>=10', '<=10', False),
('<=10', '>=10', None),
('>=10', '>=10', None),
('20', '30-40', True),
('20', '20-30', None),
('20', '10-30', None),
('20', '10-20', False),
('20', '5-10', False),
('30-40', '20', False),
('20-30', '20', False),
('10-30', '20', None),
('10-20', '20', None),
('5-10', '20', True),
('10-20', '30-40', True),
('10-20', '20-30', None),
('10-20', '15-25', None),
('10-20', '10-20', None),
('10-20', '5-15', None),
('10-20', '5-10', False),
('10-20', '5-9', False),
])
def test_less(self, a_str: str, b_str: str, expected_result: bool):
a = quant.from_string(a_str)
b = quant.from_string(b_str)
assert quant.less(a, b) == expected_result
assert (a < b) == expected_result
@pytest.mark.parametrize("a_str,b_str,expected_result", [
('10', '20', True),
('20', '10', False),
('10', '10', True),
('>10', '20', None),
('>20', '10', False),
('>10', '10', False),
('<10', '20', True),
('<20', '10', None),
('<10', '10', True),
('>=10', '20', None),
('>=20', '10', False),
('>=10', '10', None),
('<=10', '20', True),
('<=20', '10', None),
('<=10', '10', True),
('10', '>20', True),
('20', '>10', None),
('10', '>10', True),
('10', '<20', None),
('20', '<10', False),
('10', '<10', False),
('10', '>=20', True),
('20', '>=10', None),
('10', '>=10', True),
('10', '<=20', None),
('20', '<=10', False),
('10', '<=10', None),
('<10', '<20', None),
('>10', '<20', None),
('<10', '>20', True),
('>10', '>20', None),
('<20', '<10', None),
('>20', '<10', False),
('<20', '>10', None),
('>20', '>10', None),
('<10', '<10', None),
('>10', '<10', False),
('<10', '>10', True),
('>10', '>10', None),
('<=10', '<=20', None),
('>=10', '<=20', None),
('<=10', '>=20', True),
('>=10', '>=20', None),
('<=20', '<=10', None),
('>=20', '<=10', False),
('<=20', '>=10', None),
('>=20', '>=10', None),
('<=10', '<=10', None),
('>=10', '<=10', None),
('<=10', '>=10', True),
('>=10', '>=10', None),
('20', '30-40', True),
('20', '20-30', True),
('20', '10-30', None),
('20', '10-20', None),
('20', '5-10', False),
('30-40', '20', False),
('20-30', '20', None),
('10-30', '20', None),
('10-20', '20', True),
('5-10', '20', True),
('10-20', '30-40', True),
('10-20', '20-30', True),
('10-20', '15-25', None),
('10-20', '10-20', None),
('10-20', '5-15', None),
('10-20', '5-10', None),
('10-20', '5-9', False),
])
def test_less_equal(self, a_str: str, b_str: str, expected_result: bool):
a = quant.from_string(a_str)
b = quant.from_string(b_str)
assert quant.less_equal(a, b) == expected_result
assert (a <= b) == expected_result
@pytest.mark.parametrize("a_str,b_str", [
('10', '-10'),
('0.001', '-0.001'),
('>10', '<-10'),
('<10', '>-10'),
('>=10', '<=-10'),
('<=10', '>=-10'),
('~10', '~-10'),
('2-3', '-3--2'),
])
def test_neg(self, a_str: str, b_str: str):
a = quant.from_string(a_str)
b = quant.from_string(b_str)
assert quant.isclose(quant.neg(a), b)
assert quant.isclose(-a, b)
assert quant.isclose(quant.neg(b), a)
assert quant.isclose(-b, a)
@pytest.mark.parametrize("a,b_str,result_str", [
(2.0, '2', '4'),
(3.0, '2', '9'),
(10.0, '2', '100'),
(10.0, '-2', '0.01'),
(10.0, '>2', '>100'),
(10.0, '<2', '<100'),
(10.0, '>=2', '>=100'),
(10.0, '<=2', '<=100'),
(10.0, '~2', '~100'),
(10.0, '2-3', '100-1000'),
])
def test_pow(self, a: float, b_str: str, result_str: str):
b = quant.from_string(b_str)
result = quant.from_string(result_str)
assert quant.isclose(quant.pow(a, b), result)
@pytest.mark.parametrize("a_str,b_str", [
('10', '10'),
('0.001', '0.001'),
('>10', '>10'),
('<10', '<10'),
('>=10', '>=10'),
('<=10', '<=10'),
('~10', '~10'),
('2-3', '2-3'),
])
def test_pos(self, a_str: str, b_str: str):
a = quant.from_string(a_str)
b = quant.from_string(b_str)
assert quant.isclose(+a, b)
@pytest.mark.parametrize("a_str,b_str,expected_result_str", [
('30', '20', '10'),
('10', '-20', '30'),
('0.003', '0.002', '0.001'),
('>10', '5', '>5'),
('10', '>5', '<5'),
('>10', '<5', '>5'),
('<10', '5', '<5'),
('10', '<5', '>5'),
('<10', '>5', '<5'),
('>=10', '5', '>=5'),
('10', '>=5', '<=5'),
('>=10', '<=5', '>=5'),
('>=10', '<5', '>5'),
('<=10', '5', '<=5'),
('10', '<=5', '>=5'),
('<=10', '>=5', '<=5'),
('<=10', '>5', '<5'),
('~10', '5', '~5'),
('~10', '~5', '~5'),
('10', '2-3', '7-8'),
('12-13', '10', '2-3'),
('10-20', '2-3', '7-18'),
('>10', '2-3', '>7'),
('<10', '2-3', '<8'),
('>=10', '2-3', '>=7'),
('<=10', '2-3', '<=8'),
])
def test_sub(self, a_str: str, b_str: str, expected_result_str: str):
a = quant.from_string(a_str)
b = quant.from_string(b_str)
expected_result = quant.from_string(expected_result_str)
assert quant.isclose(quant.sub(a, b), expected_result)
assert quant.isclose(a - b, expected_result)
| 31.144788
| 77
| 0.365214
| 1,953
| 16,133
| 2.9319
| 0.034306
| 0.091512
| 0.069857
| 0.062871
| 0.919141
| 0.892595
| 0.871638
| 0.852428
| 0.830772
| 0.816975
| 0
| 0.177849
| 0.325606
| 16,133
| 517
| 78
| 31.205029
| 0.348438
| 0
| 0
| 0.658487
| 0
| 0
| 0.189425
| 0.013885
| 0
| 0
| 0
| 0
| 0.05317
| 1
| 0.026585
| false
| 0
| 0.00409
| 0
| 0.03272
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
db9002f17d84ed293e7edaab4feb176eab770dd3
| 3,290
|
py
|
Python
|
api/python/indigo/salts.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | null | null | null |
api/python/indigo/salts.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | null | null | null |
api/python/indigo/salts.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | null | null | null |
SALTS = [
"[NH4+]", "[Li+]", "[Na+]", "[K+]", "[Mg+2]", "[Ca+2]", "[Al+3]",
"[Fe+2]", "[Fe+3]", "[Co+2]", "[Ni+2]", "[Cu+2]", "[Zn+2]", "[Ag+]",
"[F-]", "[Cl-]", "[Br-]", "[I-]", "[S-2]", "[C-]#N",
"N(=O)[O-]", "[N+](=O)([O-])[O-]", "[O-]P(=O)([O-])[O-]",
"[O-][As](=O)([O-])[O-]", "[O-]S(=O)[O-]", "[O-]S(=O)(=O)[O-]",
"C(=O)([O-])[O-]", "[O-][Si](=O)[O-]", "[O-][Cr](=O)(=O)[O-]",
"[O-][Cr](=O)(=O)O[Cr](=O)(=O)[O-]", "[O-][Mn](=O)(=O)=O", "[O-]Cl",
"[O-]Cl=O", "[O-]Cl(=O)=O", "[O-]Cl(=O)(=O)=O",
"C(=O)[O-]", "CC(=O)[O-]", "CCC(=O)[O-]", "CCCC(=O)[O-]", "CCCCC(=O)[O-]",
"CCCCCCCCCCCCCCCC(=O)[O-]", "CCCCCCCCCCCCCCCCCC(=O)[O-]",
"C(C(=O)[O-])Cl", "C(C(=O)[O-])(Cl)Cl", "C(=O)(C(Cl)(Cl)Cl)[O-]",
"C=CC(=O)[O-]", "CC(=C)C(=O)[O-]", "CCCCCCCC/C=C\CCCCCCCC(=O)[O-]",
"CCCCC/C=C\C/C=C\CCCCCCCC(=O)[O-]", "CC/C=C\C/C=C\C/C=C\CCCCCCCC(=O)[O-]",
"C/C=C/C(=O)[O-]", "C1=CC=C(C=C1)C(=O)[O-]", "C1=CC=C(C=C1)CC(=O)[O-]",
"C1=CC=C(C=C1)CC(=O)[O-]", "C(=O)(C(=O)[O-])[O-]",
"C(CCC(=O)[O-])CC(=O)[O-]", "C(CCCC(=O)[O-])CCCC(=O)[O-]",
"C(C(=O)[O-])C(=O)[O-]", "C1=CC(=CC=C1C(=O)[O-])C(=O)[O-]",
"CC(C(=O)[O-])O", "C(C(C(=O)[O-])O)(C(=O)[O-])O",
"C(C(=O)[O-])C(CC(=O)[O-])(C(=O)[O-])O", "C(CC(=O)[O-])C(=O)[O-]",
"C([C@H]([C@H]([C@@H]([C@H](C(=O)[O-])O)O)O)O)O",
"C1=CC=C(C(=C1)C(=O)O)[O-]", "CC(=O)OC1=CC=CC=C1C(=O)[O-]"
]
BASIC_METALS = [
"[Li]", "[Na]", "[K]", "[Mg]", "[Ca]", "[Al]", "[Fe]", "[Co]", "[Ni]",
"[Cu]", "[Zn]", "[Ag]"
]
INORGANIC_CATIONS = [
"[NH4+]", "[Li+]", "[Na+]", "[K+]", "[Mg+2]", "[Ca+2]", "[Al+3]",
"[Fe+2]", "[Fe+3]", "[Co+2]", "[Ni+2]", "[Cu+2]", "[Zn+2]", "[Ag+]"
]
HYDRACIDS_ANIONS = [
"[F-]", "[Cl-]", "[Br-]", "[I-]", "[S-2]", "[C-]#N"
]
INORGANIC_OXYACIDS_ANIONS = [
"N(=O)[O-]", "[N+](=O)([O-])[O-]", "[O-]P(=O)([O-])[O-]",
"[O-][As](=O)([O-])[O-]", "[O-]S(=O)[O-]", "[O-]S(=O)(=O)[O-]",
"C(=O)([O-])[O-]", "[O-][Si](=O)[O-]", "[O-][Cr](=O)(=O)[O-] ",
"[O-][Cr](=O)(=O)O[Cr](=O)(=O)[O-]", "[O-][Mn](=O)(=O)=O", "[O-]Cl",
"[[O-]Cl=O]", "[O-]Cl(=O)=O", "[O-]Cl(=O)(=O)=O"
]
ORGANIC_OXYACIDS_ANIONS = [
# acyclic monocarboxylic saturated acids and it`s halogen derivatives
"C(=O)[O-]", "CC(=O)[O-]", "CCC(=O)[O-]", "CCCC(=O)[O-]", "CCCCC(=O)[O-]",
"CCCCCCCCCCCCCCCC(=O)[O-]", "CCCCCCCCCCCCCCCCCC(=O)[O-]",
"C(C(=O)[O-])Cl", "C(C(=O)[O-])(Cl)Cl", "C(=O)(C(Cl)(Cl)Cl)[O-]",
# acyclic monocarboxylic unsaturated acids, cyclic acids
# and it`s halogen derivatives
"C=CC(=O)[O-]", "CC(=C)C(=O)[O-]", "CCCCCCCC/C=C\CCCCCCCC(=O)[O-]",
"CCCCC/C=C\C/C=C\CCCCCCCC(=O)[O-]", "CC/C=C\C/C=C\C/C=C\CCCCCCCC(=O)[O-]",
"C/C=C/C(=O)[O-]", "C1=CC=C(C=C1)C(=O)[O-]", "C1=CC=C(C=C1)CC(=O)[O-]",
"C1=CC=C(C=C1)CC(=O)[O-]",
# polycarboxylic acids and it`s halogen derivatives
"C(=O)(C(=O)[O-])[O-]", "C(CCC(=O)[O-])CC(=O)[O-]",
"C(CCCC(=O)[O-])CCCC(=O)[O-]", "C(C(=O)[O-])C(=O)[O-]",
"C1=CC(=CC=C1C(=O)[O-])C(=O)[O-]",
# acids with additional functional group
"CC(C(=O)[O-])O", "C(C(C(=O)[O-])O)(C(=O)[O-])O",
"C(C(=O)[O-])C(CC(=O)[O-])(C(=O)[O-])O", "C(CC(=O)[O-])C(=O)[O-]",
"C([C@H]([C@H]([C@@H]([C@H](C(=O)[O-])O)O)O)O)O",
"C1=CC=C(C(=C1)C(=O)O)[O-]", "CC(=O)OC1=CC=CC=C1C(=O)[O-]"
]
| 48.382353
| 78
| 0.375988
| 671
| 3,290
| 1.833085
| 0.09389
| 0.273171
| 0.15122
| 0.071545
| 0.808943
| 0.808943
| 0.808943
| 0.785366
| 0.721951
| 0.721951
| 0
| 0.015961
| 0.124012
| 3,290
| 67
| 79
| 49.104478
| 0.410826
| 0.072948
| 0
| 0.526316
| 0
| 0.105263
| 0.697766
| 0.380421
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
dba821a1013bb0ef2331a426e7ac43ed2a5d6df8
| 8,141
|
py
|
Python
|
test/test_config_regex.py
|
huggingface/neural-compressor
|
aaad4c357a86914ffa583753c9a26d949838a2a5
|
[
"Apache-2.0"
] | 172
|
2021-09-14T18:34:17.000Z
|
2022-03-30T06:49:53.000Z
|
test/test_config_regex.py
|
intel/lp-opt-tool
|
130eefa3586b38df6c0ff78cc8807ae273f6a63f
|
[
"Apache-2.0"
] | 40
|
2021-09-14T02:26:12.000Z
|
2022-03-29T08:34:04.000Z
|
test/test_config_regex.py
|
intel/neural-compressor
|
16a4a12045fcb468da4d33769aff2c1a5e2ba6ba
|
[
"Apache-2.0"
] | 33
|
2021-09-15T07:27:25.000Z
|
2022-03-25T08:30:57.000Z
|
#
# -*- coding: utf-8 -*-
#
import os
import unittest
import tensorflow as tf
from tensorflow.python.framework import graph_util
from neural_compressor.adaptor.tf_utils.util import disable_random
def build_fake_yaml():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
inputs: input
outputs: op_to_store
device: cpu
quantization:
op_wise: {
\"conv1_[1-2]\": {
\"activation\": {\"dtype\": [\"fp32\"]},
},
}
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: basic
exit_policy:
timeout: 0
accuracy_criterion:
relative: 0.05
exit_policy:
performance_only: True
workspace:
path: saved
'''
with open('fake_yaml.yaml', "w", encoding="utf-8") as f:
f.write(fake_yaml)
f.close()
def build_fake_yaml_invalid_model_wise():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
inputs: input
outputs: op_to_store
device: cpu
quantization:
op_wise: {
\"conv1_[1-2]\": {
\"activation\": {\"dtype\": [\"fp32\"]},
},
}
model_wise:
weight:
granularity: per_channel
scheme: sym
dtype: int8
algorithm: minmax
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: basic
exit_policy:
timeout: 0
accuracy_criterion:
relative: 0.05
workspace:
path: saved
'''
with open('fake_yaml_with_invalid_cfg.yaml', "w", encoding="utf-8") as f:
f.write(fake_yaml)
f.close()
class TestConfigRegex(unittest.TestCase):
@classmethod
def setUpClass(self):
build_fake_yaml()
build_fake_yaml_invalid_model_wise()
@classmethod
def tearDownClass(self):
os.remove('fake_yaml.yaml')
os.remove('fake_yaml_with_invalid_cfg.yaml')
@disable_random()
def test_config_regex(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input")
top_relu = tf.nn.relu(x)
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
x_pad = tf.pad(top_relu, paddings, "CONSTANT")
conv_weights = tf.compat.v1.get_variable("weight", [3, 3, 16, 16],
initializer=tf.compat.v1.random_normal_initializer())
conv_weights_2 = tf.compat.v1.get_variable("weight_2", [3, 8, 16, 16],
initializer=tf.compat.v1.random_normal_initializer())
conv = tf.nn.conv2d(x_pad, conv_weights, strides=[
1, 2, 2, 1], padding="VALID", name='conv1_1')
normed1 = tf.compat.v1.layers.batch_normalization(conv)
relu = tf.nn.relu(normed1)
max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME")
conv_bias = tf.compat.v1.get_variable("bias", [16],
initializer=tf.compat.v1.random_normal_initializer())
conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[
1, 2, 2, 1], padding="VALID", name='conv1_3')
conv_bias = tf.math.add(conv_1, conv_bias)
relu6 = tf.nn.relu6(conv_bias, name='op_to_store')
out_name = relu6.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=sess.graph_def,
output_node_names=[out_name])
for i in output_graph_def.node:
if i.op.find('Add') != -1:
i.op = 'Add'
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(100, 56, 56, 16), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = output_graph_def
output_graph = quantizer.fit()
found_fp32_conv = False
found_quantized_conv = False
for i in output_graph.graph_def.node:
if i.op == 'Conv2D' and i.name == 'conv1_1':
found_fp32_conv = True
if i.op.find("QuantizedConv2D") != -1 and i.name == 'conv1_3_eightbit_requantize':
found_quantized_conv = True
self.assertEqual(found_fp32_conv, True)
self.assertEqual(found_quantized_conv, True)
def test_config_regex_with_invalid_cfg(self):
tf.compat.v1.disable_eager_execution()
tf.compat.v1.reset_default_graph()
tf.compat.v1.set_random_seed(1)
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input")
top_relu = tf.nn.relu(x)
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
x_pad = tf.pad(top_relu, paddings, "CONSTANT")
conv_weights = tf.compat.v1.get_variable("weight", [3, 3, 16, 16],
initializer=tf.compat.v1.random_normal_initializer())
conv_weights_2 = tf.compat.v1.get_variable("weight_2", [3, 8, 16, 16],
initializer=tf.compat.v1.random_normal_initializer())
conv = tf.nn.conv2d(x_pad, conv_weights, strides=[
1, 2, 2, 1], padding="VALID", name='conv1_1')
normed1 = tf.compat.v1.layers.batch_normalization(conv)
relu = tf.nn.relu(normed1)
max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding="SAME")
conv_bias = tf.compat.v1.get_variable("bias", [16],
initializer=tf.compat.v1.random_normal_initializer())
conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[
1, 2, 2, 1], padding="VALID", name='conv1_3')
conv_bias = tf.math.add(conv_1, conv_bias)
relu6 = tf.nn.relu6(conv_bias, name='op_to_store')
out_name = relu6.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=sess.graph_def,
output_node_names=[out_name])
for i in output_graph_def.node:
if i.op.find('Add') != -1:
i.op = 'Add'
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml_with_invalid_cfg.yaml')
dataset = quantizer.dataset('dummy', shape=(100, 56, 56, 16), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = output_graph_def
output_graph = quantizer.fit()
found_fp32_conv = False
found_quantized_conv = False
for i in output_graph.graph_def.node:
if i.op == 'Conv2D' and i.name == 'conv1_1':
found_fp32_conv = True
if i.op.find("QuantizedConv2D") != -1 and i.name == 'conv1_3_eightbit_requantize':
found_quantized_conv = True
self.assertEqual(found_fp32_conv, True)
self.assertEqual(found_quantized_conv, True)
if __name__ == '__main__':
unittest.main()
| 38.582938
| 104
| 0.546862
| 943
| 8,141
| 4.480382
| 0.181336
| 0.04355
| 0.054438
| 0.018462
| 0.862485
| 0.862485
| 0.835976
| 0.819882
| 0.819882
| 0.819882
| 0
| 0.040268
| 0.341113
| 8,141
| 210
| 105
| 38.766667
| 0.74739
| 0.00258
| 0
| 0.796703
| 0
| 0
| 0.2435
| 0.018115
| 0
| 0
| 0
| 0
| 0.021978
| 1
| 0.032967
| false
| 0
| 0.038462
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dbbb03cd1978ab396ed07823d073966aae64cfa6
| 5,286
|
py
|
Python
|
Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/taskpriority/test_taskpriority_views.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 1
|
2021-07-24T17:22:50.000Z
|
2021-07-24T17:22:50.000Z
|
Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/taskpriority/test_taskpriority_views.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-28T03:40:31.000Z
|
2022-02-28T03:40:52.000Z
|
Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/taskpriority/test_taskpriority_views.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-25T08:34:51.000Z
|
2022-03-16T17:29:44.000Z
|
from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_main.models import Taskpriority
import urllib.parse
class TaskpriorityViewTestCase(TestCase):
""" taskpriority view tests """
@classmethod
def setUpTestData(cls):
# create object
Taskpriority.objects.create(taskpriority_name='prio_1')
# create user
User.objects.create_user(username='testuser_taskpriority', password='VxuP85UUDkfXwRuwRFqA')
def test_taskpriority_list_not_logged_in(self):
""" test list view """
# create url
destination = '/login/?next=' + urllib.parse.quote('/taskpriority/', safe='')
# get response
response = self.client.get('/taskpriority/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_taskpriority_list_logged_in(self):
""" test list view """
# login testuser
self.client.login(username='testuser_taskpriority', password='VxuP85UUDkfXwRuwRFqA')
# get response
response = self.client.get('/taskpriority/')
# compare
self.assertEqual(response.status_code, 200)
def test_taskpriority_list_template(self):
""" test list view """
# login testuser
self.client.login(username='testuser_taskpriority', password='VxuP85UUDkfXwRuwRFqA')
# get response
response = self.client.get('/taskpriority/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/taskpriority/taskpriority_list.html')
def test_taskpriority_list_get_user_context(self):
""" test list view """
# login testuser
self.client.login(username='testuser_taskpriority', password='VxuP85UUDkfXwRuwRFqA')
# get response
response = self.client.get('/taskpriority/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_taskpriority')
def test_taskpriority_list_redirect(self):
""" test list view """
# login testuser
self.client.login(username='testuser_taskpriority', password='VxuP85UUDkfXwRuwRFqA')
# create url
destination = urllib.parse.quote('/taskpriority/', safe='/')
# get response
response = self.client.get('/taskpriority', follow=True)
# compare
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
def test_taskpriority_detail_not_logged_in(self):
""" test detail view """
# get object
taskpriority_1 = Taskpriority.objects.get(taskpriority_name='prio_1')
# create url
destination = '/login/?next=' + urllib.parse.quote('/taskpriority/' + str(taskpriority_1.taskpriority_id) + '/', safe='')
# get response
response = self.client.get('/taskpriority/' + str(taskpriority_1.taskpriority_id) + '/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_taskpriority_detail_logged_in(self):
""" test detail view """
# get object
taskpriority_1 = Taskpriority.objects.get(taskpriority_name='prio_1')
# login testuser
self.client.login(username='testuser_taskpriority', password='VxuP85UUDkfXwRuwRFqA')
# get response
response = self.client.get('/taskpriority/' + str(taskpriority_1.taskpriority_id) + '/')
# compare
self.assertEqual(response.status_code, 200)
def test_taskpriority_detail_template(self):
""" test detail view """
# get object
taskpriority_1 = Taskpriority.objects.get(taskpriority_name='prio_1')
# login testuser
self.client.login(username='testuser_taskpriority', password='VxuP85UUDkfXwRuwRFqA')
# get response
response = self.client.get('/taskpriority/' + str(taskpriority_1.taskpriority_id) + '/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/taskpriority/taskpriority_detail.html')
def test_taskpriority_detail_get_user_context(self):
""" test detail view """
# get object
taskpriority_1 = Taskpriority.objects.get(taskpriority_name='prio_1')
# login testuser
self.client.login(username='testuser_taskpriority', password='VxuP85UUDkfXwRuwRFqA')
# get response
response = self.client.get('/taskpriority/' + str(taskpriority_1.taskpriority_id) + '/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_taskpriority')
def test_taskpriority_detail_redirect(self):
""" test detail view """
# get object
taskpriority_1 = Taskpriority.objects.get(taskpriority_name='prio_1')
# login testuser
self.client.login(username='testuser_taskpriority', password='VxuP85UUDkfXwRuwRFqA')
# create url
destination = urllib.parse.quote('/taskpriority/' + str(taskpriority_1.taskpriority_id) + '/', safe='/')
# get response
response = self.client.get('/taskpriority/' + str(taskpriority_1.taskpriority_id), follow=True)
# compare
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
| 40.661538
| 129
| 0.674801
| 535
| 5,286
| 6.476636
| 0.121495
| 0.051948
| 0.08658
| 0.066378
| 0.883405
| 0.849639
| 0.840404
| 0.836364
| 0.791053
| 0.774315
| 0
| 0.01585
| 0.212259
| 5,286
| 129
| 130
| 40.976744
| 0.816282
| 0.122966
| 0
| 0.482143
| 0
| 0
| 0.173472
| 0.073494
| 0
| 0
| 0
| 0
| 0.178571
| 1
| 0.196429
| false
| 0.160714
| 0.071429
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
53187215da77a0ac40dc6f5fb88edfdf4e593949
| 70,277
|
py
|
Python
|
src/oci/management_dashboard/dashx_apis_client.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/management_dashboard/dashx_apis_client.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/management_dashboard/dashx_apis_client.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from oci._vendor import requests # noqa: F401
from oci._vendor import six
from oci import retry # noqa: F401
from oci.base_client import BaseClient
from oci.config import get_config_value_or_default, validate_config
from oci.signer import Signer
from oci.util import Sentinel, get_signer_from_authentication_type, AUTHENTICATION_TYPE_FIELD_NAME
from .models import management_dashboard_type_mapping
missing = Sentinel("Missing")
class DashxApisClient(object):
"""
API for the Management Dashboard micro-service. Use this API for dashboard and saved search metadata preservation and to perform tasks such as creating a dashboard, creating a saved search, and obtaining a list of dashboards and saved searches in a compartment.
"""
def __init__(self, config, **kwargs):
"""
Creates a new service client
:param dict config:
Configuration keys and values as per `SDK and Tool Configuration <https://docs.cloud.oracle.com/Content/API/Concepts/sdkconfig.htm>`__.
The :py:meth:`~oci.config.from_file` method can be used to load configuration from a file. Alternatively, a ``dict`` can be passed. You can validate_config
the dict using :py:meth:`~oci.config.validate_config`
:param str service_endpoint: (optional)
The endpoint of the service to call using this client. For example ``https://iaas.us-ashburn-1.oraclecloud.com``. If this keyword argument is
not provided then it will be derived using the region in the config parameter. You should only provide this keyword argument if you have an explicit
need to specify a service endpoint.
:param timeout: (optional)
The connection and read timeouts for the client. The default values are connection timeout 10 seconds and read timeout 60 seconds. This keyword argument can be provided
as a single float, in which case the value provided is used for both the read and connection timeouts, or as a tuple of two floats. If
a tuple is provided then the first value is used as the connection timeout and the second value as the read timeout.
:type timeout: float or tuple(float, float)
:param signer: (optional)
The signer to use when signing requests made by the service client. The default is to use a :py:class:`~oci.signer.Signer` based on the values
provided in the config parameter.
One use case for this parameter is for `Instance Principals authentication <https://docs.cloud.oracle.com/Content/Identity/Tasks/callingservicesfrominstances.htm>`__
by passing an instance of :py:class:`~oci.auth.signers.InstancePrincipalsSecurityTokenSigner` as the value for this keyword argument
:type signer: :py:class:`~oci.signer.AbstractBaseSigner`
:param obj retry_strategy: (optional)
A retry strategy to apply to all calls made by this service client (i.e. at the client level). There is no retry strategy applied by default.
Retry strategies can also be applied at the operation level by passing a ``retry_strategy`` keyword argument as part of calling the operation.
Any value provided at the operation level will override whatever is specified at the client level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
"""
validate_config(config, signer=kwargs.get('signer'))
if 'signer' in kwargs:
signer = kwargs['signer']
elif AUTHENTICATION_TYPE_FIELD_NAME in config:
signer = get_signer_from_authentication_type(config)
else:
signer = Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
base_client_init_kwargs = {
'regional_client': True,
'service_endpoint': kwargs.get('service_endpoint'),
'base_path': '/20200901',
'service_endpoint_template': 'https://managementdashboard.{region}.oci.{secondLevelDomain}',
'skip_deserialization': kwargs.get('skip_deserialization', False)
}
if 'timeout' in kwargs:
base_client_init_kwargs['timeout'] = kwargs.get('timeout')
self.base_client = BaseClient("dashx_apis", config, signer, management_dashboard_type_mapping, **base_client_init_kwargs)
self.retry_strategy = kwargs.get('retry_strategy')
def change_management_dashboards_compartment(self, management_dashboard_id, change_management_dashboards_compartment_details, **kwargs):
"""
Moves the dashboard from the existing compartment to a new compartment.
:param str management_dashboard_id: (required)
A unique dashboard identifier.
:param oci.management_dashboard.models.ChangeManagementDashboardsCompartmentDetails change_management_dashboards_compartment_details: (required)
ID of the dashboard that is being moved.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.management_dashboard.models.ManagementDashboard`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/managementdashboard/change_management_dashboards_compartment.py.html>`__ to see an example of how to use change_management_dashboards_compartment API.
"""
resource_path = "/managementDashboards/{managementDashboardId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_management_dashboards_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managementDashboardId": management_dashboard_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_management_dashboards_compartment_details,
response_type="ManagementDashboard")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_management_dashboards_compartment_details,
response_type="ManagementDashboard")
def change_management_saved_searches_compartment(self, management_saved_search_id, change_management_saved_searches_compartment_details, **kwargs):
"""
Moves the saved search from the existing compartment to a new compartment.
:param str management_saved_search_id: (required)
A unique saved search identifier.
:param oci.management_dashboard.models.ChangeManagementSavedSearchesCompartmentDetails change_management_saved_searches_compartment_details: (required)
ID of the saved search that is being moved.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.management_dashboard.models.ManagementSavedSearch`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/managementdashboard/change_management_saved_searches_compartment.py.html>`__ to see an example of how to use change_management_saved_searches_compartment API.
"""
resource_path = "/managementSavedSearches/{managementSavedSearchId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_management_saved_searches_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managementSavedSearchId": management_saved_search_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_management_saved_searches_compartment_details,
response_type="ManagementSavedSearch")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_management_saved_searches_compartment_details,
response_type="ManagementSavedSearch")
def create_management_dashboard(self, create_management_dashboard_details, **kwargs):
"""
Creates a new dashboard. Limit for number of saved searches in a dashboard is 20. Here's an example of how you can use CLI to create a dashboard. For information on the details that must be passed to CREATE, you can use the GET API to obtain the Create.json file:
oci management-dashboard dashboard get --management-dashboard-id \"ocid1.managementdashboard.oc1..dashboardId1\" --query data > Create.json.
You can then modify the Create.json file by removing the\"id\" attribute and making other required changes, and use the oci management-dashboard dashboard create command.
:param oci.management_dashboard.models.CreateManagementDashboardDetails create_management_dashboard_details: (required)
JSON metadata for creating a new dashboard.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.management_dashboard.models.ManagementDashboard`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/managementdashboard/create_management_dashboard.py.html>`__ to see an example of how to use create_management_dashboard API.
"""
resource_path = "/managementDashboards"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_management_dashboard got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_management_dashboard_details,
response_type="ManagementDashboard")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_management_dashboard_details,
response_type="ManagementDashboard")
def create_management_saved_search(self, create_management_saved_search_details, **kwargs):
"""
Creates a new saved search. Here's an example of how you can use CLI to create a saved search. For information on the details that must be passed to CREATE, you can use the GET API to obtain the Create.json file:
oci management-dashboard saved-search get --management-saved-search-id ocid1.managementsavedsearch.oc1..savedsearchId1 --query data > Create.json.
You can then modify the Create.json file by removing the \"id\" attribute and making other required changes, and use the oci management-dashboard saved-search create command.
:param oci.management_dashboard.models.CreateManagementSavedSearchDetails create_management_saved_search_details: (required)
JSON metadata for the saved search.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.management_dashboard.models.ManagementSavedSearch`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/managementdashboard/create_management_saved_search.py.html>`__ to see an example of how to use create_management_saved_search API.
"""
resource_path = "/managementSavedSearches"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_management_saved_search got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_management_saved_search_details,
response_type="ManagementSavedSearch")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_management_saved_search_details,
response_type="ManagementSavedSearch")
def delete_management_dashboard(self, management_dashboard_id, **kwargs):
"""
Deletes a Dashboard by ID.
:param str management_dashboard_id: (required)
A unique dashboard identifier.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/managementdashboard/delete_management_dashboard.py.html>`__ to see an example of how to use delete_management_dashboard API.
"""
resource_path = "/managementDashboards/{managementDashboardId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_management_dashboard got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managementDashboardId": management_dashboard_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_management_saved_search(self, management_saved_search_id, **kwargs):
"""
Deletes a saved search by ID.
:param str management_saved_search_id: (required)
A unique saved search identifier.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/managementdashboard/delete_management_saved_search.py.html>`__ to see an example of how to use delete_management_saved_search API.
"""
resource_path = "/managementSavedSearches/{managementSavedSearchId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_management_saved_search got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managementSavedSearchId": management_saved_search_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def export_dashboard(self, export_dashboard_id, **kwargs):
"""
Exports an array of dashboards and their saved searches. Export is designed to work with importDashboard. Here's an example of how you can use CLI to export a dashboard. $oci management-dashboard dashboard export --query data --export-dashboard-id \"{\\\"dashboardIds\\\":[\\\"ocid1.managementdashboard.oc1..dashboardId1\\\"]}\" > dashboards.json
:param str export_dashboard_id: (required)
List of dashboardIds in plain text. The syntax is '{\"dashboardIds\":[\"dashboardId1\", \"dashboardId2\", ...]}'. Escaping is needed when using in OCI CLI. For example, \"{\\\"dashboardIds\\\":[\\\"ocid1.managementdashboard.oc1..dashboardId1\\\"]}\" .
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.management_dashboard.models.ManagementDashboardExportDetails`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/managementdashboard/export_dashboard.py.html>`__ to see an example of how to use export_dashboard API.
"""
resource_path = "/managementDashboards/actions/exportDashboard/{exportDashboardId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"export_dashboard got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"exportDashboardId": export_dashboard_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ManagementDashboardExportDetails")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ManagementDashboardExportDetails")
def get_management_dashboard(self, management_dashboard_id, **kwargs):
"""
Gets a dashboard and its saved searches by ID. Deleted or unauthorized saved searches are marked by tile's state property.
:param str management_dashboard_id: (required)
A unique dashboard identifier.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.management_dashboard.models.ManagementDashboard`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/managementdashboard/get_management_dashboard.py.html>`__ to see an example of how to use get_management_dashboard API.
"""
resource_path = "/managementDashboards/{managementDashboardId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_management_dashboard got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managementDashboardId": management_dashboard_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ManagementDashboard")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ManagementDashboard")
def get_management_saved_search(self, management_saved_search_id, **kwargs):
"""
Gets a saved search by ID.
:param str management_saved_search_id: (required)
A unique saved search identifier.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.management_dashboard.models.ManagementSavedSearch`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/managementdashboard/get_management_saved_search.py.html>`__ to see an example of how to use get_management_saved_search API.
"""
resource_path = "/managementSavedSearches/{managementSavedSearchId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_management_saved_search got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managementSavedSearchId": management_saved_search_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ManagementSavedSearch")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ManagementSavedSearch")
def import_dashboard(self, management_dashboard_import_details, **kwargs):
"""
Imports an array of dashboards and their saved searches. Here's an example of how you can use CLI to import a dashboard. For information on the details that must be passed to IMPORT, you can use the EXPORT API to obtain the Import.json file:
oci management-dashboard dashboard export --query data --export-dashboard-id \"{\\\"dashboardIds\\\":[\\\"ocid1.managementdashboard.oc1..dashboardId1\\\"]}\" > Import.json.
Note that import API updates the resource if it already exist, and creates a new resource if it does not exist. To import to a different compartment, edit and change the compartmentId to the desired compartment OCID.
Here is an example of how you can use CLI to do import:
oci management-dashboard dashboard import --from-json file://Import.json
:param oci.management_dashboard.models.ManagementDashboardImportDetails management_dashboard_import_details: (required)
JSON metadata for importing dashboards and their saved searches.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/managementdashboard/import_dashboard.py.html>`__ to see an example of how to use import_dashboard API.
"""
resource_path = "/managementDashboards/actions/importDashboard"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"import_dashboard got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=management_dashboard_import_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=management_dashboard_import_details)
def list_management_dashboards(self, compartment_id, **kwargs):
"""
Gets the list of dashboards in a compartment with pagination. Returned properties are the summary.
:param str compartment_id: (required)
The ID of the compartment in which to list resources.
:param str display_name: (optional)
A filter to return only resources that match the entire display name given.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page on which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for timeCreated is descending. Default order for displayName is ascending. If no value is specified timeCreated is the default.
Allowed values are: "timeCreated", "displayName"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.management_dashboard.models.ManagementDashboardCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/managementdashboard/list_management_dashboards.py.html>`__ to see an example of how to use list_management_dashboards API.
"""
resource_path = "/managementDashboards"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"opc_request_id",
"limit",
"page",
"sort_order",
"sort_by"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_management_dashboards got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"displayName": kwargs.get("display_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ManagementDashboardCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ManagementDashboardCollection")
def list_management_saved_searches(self, compartment_id, **kwargs):
"""
Gets the list of saved searches in a compartment with pagination. Returned properties are the summary.
:param str compartment_id: (required)
The ID of the compartment in which to list resources.
:param str display_name: (optional)
A filter to return only resources that match the entire display name given.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page on which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for timeCreated is descending. Default order for displayName is ascending. If no value is specified timeCreated is the default.
Allowed values are: "timeCreated", "displayName"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.management_dashboard.models.ManagementSavedSearchCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/managementdashboard/list_management_saved_searches.py.html>`__ to see an example of how to use list_management_saved_searches API.
"""
resource_path = "/managementSavedSearches"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"opc_request_id",
"limit",
"page",
"sort_order",
"sort_by"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_management_saved_searches got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"displayName": kwargs.get("display_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ManagementSavedSearchCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ManagementSavedSearchCollection")
def update_management_dashboard(self, management_dashboard_id, update_management_dashboard_details, **kwargs):
"""
Updates an existing dashboard identified by ID path parameter. CompartmentId can be modified only by the changeCompartment API. Limit for number of saved searches in a dashboard is 20.
:param str management_dashboard_id: (required)
A unique dashboard identifier.
:param oci.management_dashboard.models.UpdateManagementDashboardDetails update_management_dashboard_details: (required)
JSON metadata for changed dashboard properties.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.management_dashboard.models.ManagementDashboard`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/managementdashboard/update_management_dashboard.py.html>`__ to see an example of how to use update_management_dashboard API.
"""
resource_path = "/managementDashboards/{managementDashboardId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_management_dashboard got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managementDashboardId": management_dashboard_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_management_dashboard_details,
response_type="ManagementDashboard")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_management_dashboard_details,
response_type="ManagementDashboard")
def update_management_saved_search(self, management_saved_search_id, update_management_saved_search_details, **kwargs):
"""
Updates an existing saved search identified by ID path parameter. CompartmentId can be modified only by the changeCompartment API.
:param str management_saved_search_id: (required)
A unique saved search identifier.
:param oci.management_dashboard.models.UpdateManagementSavedSearchDetails update_management_saved_search_details: (required)
JSON metadata for changed saved search properties.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.management_dashboard.models.ManagementSavedSearch`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/managementdashboard/update_management_saved_search.py.html>`__ to see an example of how to use update_management_saved_search API.
"""
resource_path = "/managementSavedSearches/{managementSavedSearchId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_management_saved_search got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managementSavedSearchId": management_saved_search_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_management_saved_search_details,
response_type="ManagementSavedSearch")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_management_saved_search_details,
response_type="ManagementSavedSearch")
| 50.888487
| 355
| 0.658878
| 8,765
| 70,277
| 5.119338
| 0.05271
| 0.057944
| 0.014976
| 0.005304
| 0.886764
| 0.866551
| 0.855853
| 0.848165
| 0.837089
| 0.829244
| 0
| 0.001699
| 0.26286
| 70,277
| 1,380
| 356
| 50.925362
| 0.86447
| 0.466241
| 0
| 0.839542
| 0
| 0
| 0.174792
| 0.045105
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02149
| false
| 0.001433
| 0.020057
| 0
| 0.083095
| 0.001433
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5331c984f6aa0fc9ce1af62fe5097160f6c24e1f
| 47,962
|
py
|
Python
|
infoblox_netmri/api/broker/v3_6_0/vrf_route_target_broker.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/api/broker/v3_6_0/vrf_route_target_broker.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/api/broker/v3_6_0/vrf_route_target_broker.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
from ..broker import Broker
class VrfRouteTargetBroker(Broker):
controller = "vrf_route_targets"
def show(self, **kwargs):
"""Shows the details for the specified vrf route target.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param VrfRouteTargetID: The internal NetMRI identifier for this VRF route target.
:type VrfRouteTargetID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of vrf route target methods. The listed methods will be called on each vrf route target returned and included in the output. Available methods are: data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return vrf_route_target: The vrf route target identified by the specified VrfRouteTargetID.
:rtype vrf_route_target: VrfRouteTarget
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available vrf route targets. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VirtualNetworkMemberID: The internal NetMRI identifier for the Virtual Network Member that corresponds to this VRF route target.
:type VirtualNetworkMemberID: Array of Integer
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrfRouteTargetID: The internal NetMRI identifier for this VRF route target.
:type VrfRouteTargetID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the vrf route targets as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of vrf route target methods. The listed methods will be called on each vrf route target returned and included in the output. Available methods are: data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` VrfRouteTargetID
:param sort: The data field(s) to use for sorting the output. Default is VrfRouteTargetID. Valid values are VrfRouteTargetID, VrfRouteTargetStartTime, VrfRouteTargetEndTime, VrfRouteTargetChangedCols, VrfRouteTargetTimestamp, VrfRouteTargetFirstTime, DataSourceID, DeviceID, VirtualNetworkMemberID, VrfDirection, RTType, RTLeftSide, RTRightSide.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each VrfRouteTarget. Valid values are VrfRouteTargetID, VrfRouteTargetStartTime, VrfRouteTargetEndTime, VrfRouteTargetChangedCols, VrfRouteTargetTimestamp, VrfRouteTargetFirstTime, DataSourceID, DeviceID, VirtualNetworkMemberID, VrfDirection, RTType, RTLeftSide, RTRightSide. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return vrf_route_targets: An array of the VrfRouteTarget objects that match the specified input criteria.
:rtype vrf_route_targets: Array of VrfRouteTarget
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available vrf route targets matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this data was collected.
:type DeviceID: Array of Integer
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RTLeftSide: The left-hand portion of the route target; use Type to identify if it is an AS number or and IPv4 address.
:type RTLeftSide: Array of Integer
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RTRightSide: The right-hand portion of the route target.
:type RTRightSide: Array of Integer
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RTType: The style of the route target, asn or ipv4.
:type RTType: Array of String
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VirtualNetworkMemberID: The internal NetMRI identifier for the Virtual Network Member that corresponds to this VRF route target.
:type VirtualNetworkMemberID: Array of Integer
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrfDirection: The direction of the RT (import or export).
:type VrfDirection: Array of String
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrfRouteTargetChangedCols: The fields that changed between this revision of the record and the previous revision.
:type VrfRouteTargetChangedCols: Array of String
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrfRouteTargetEndTime: The ending effective time of this record, or empty if still in effect.
:type VrfRouteTargetEndTime: Array of DateTime
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrfRouteTargetFirstTime: The first time this data element was seen on the network.
:type VrfRouteTargetFirstTime: Array of DateTime
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrfRouteTargetID: The internal NetMRI identifier for this VRF route target.
:type VrfRouteTargetID: Array of Integer
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrfRouteTargetStartTime: The starting effective time of this record.
:type VrfRouteTargetStartTime: Array of DateTime
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VrfRouteTargetTimestamp: The date and time this record was collected or calculated.
:type VrfRouteTargetTimestamp: Array of DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the vrf route targets as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of vrf route target methods. The listed methods will be called on each vrf route target returned and included in the output. Available methods are: data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` VrfRouteTargetID
:param sort: The data field(s) to use for sorting the output. Default is VrfRouteTargetID. Valid values are VrfRouteTargetID, VrfRouteTargetStartTime, VrfRouteTargetEndTime, VrfRouteTargetChangedCols, VrfRouteTargetTimestamp, VrfRouteTargetFirstTime, DataSourceID, DeviceID, VirtualNetworkMemberID, VrfDirection, RTType, RTLeftSide, RTRightSide.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each VrfRouteTarget. Valid values are VrfRouteTargetID, VrfRouteTargetStartTime, VrfRouteTargetEndTime, VrfRouteTargetChangedCols, VrfRouteTargetTimestamp, VrfRouteTargetFirstTime, DataSourceID, DeviceID, VirtualNetworkMemberID, VrfDirection, RTType, RTLeftSide, RTRightSide. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against vrf route targets, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, DeviceID, RTLeftSide, RTRightSide, RTType, VirtualNetworkMemberID, VrfDirection, VrfRouteTargetChangedCols, VrfRouteTargetEndTime, VrfRouteTargetFirstTime, VrfRouteTargetID, VrfRouteTargetStartTime, VrfRouteTargetTimestamp.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return vrf_route_targets: An array of the VrfRouteTarget objects that match the specified input criteria.
:rtype vrf_route_targets: Array of VrfRouteTarget
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available vrf route targets matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, DeviceID, RTLeftSide, RTRightSide, RTType, VirtualNetworkMemberID, VrfDirection, VrfRouteTargetChangedCols, VrfRouteTargetEndTime, VrfRouteTargetFirstTime, VrfRouteTargetID, VrfRouteTargetStartTime, VrfRouteTargetTimestamp.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device from which this data was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RTLeftSide: The operator to apply to the field RTLeftSide. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RTLeftSide: The left-hand portion of the route target; use Type to identify if it is an AS number or and IPv4 address. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RTLeftSide: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RTLeftSide: If op_RTLeftSide is specified, the field named in this input will be compared to the value in RTLeftSide using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RTLeftSide must be specified if op_RTLeftSide is specified.
:type val_f_RTLeftSide: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RTLeftSide: If op_RTLeftSide is specified, this value will be compared to the value in RTLeftSide using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RTLeftSide must be specified if op_RTLeftSide is specified.
:type val_c_RTLeftSide: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RTRightSide: The operator to apply to the field RTRightSide. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RTRightSide: The right-hand portion of the route target. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RTRightSide: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RTRightSide: If op_RTRightSide is specified, the field named in this input will be compared to the value in RTRightSide using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RTRightSide must be specified if op_RTRightSide is specified.
:type val_f_RTRightSide: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RTRightSide: If op_RTRightSide is specified, this value will be compared to the value in RTRightSide using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RTRightSide must be specified if op_RTRightSide is specified.
:type val_c_RTRightSide: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RTType: The operator to apply to the field RTType. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RTType: The style of the route target, asn or ipv4. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RTType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RTType: If op_RTType is specified, the field named in this input will be compared to the value in RTType using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RTType must be specified if op_RTType is specified.
:type val_f_RTType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RTType: If op_RTType is specified, this value will be compared to the value in RTType using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RTType must be specified if op_RTType is specified.
:type val_c_RTType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VirtualNetworkMemberID: The operator to apply to the field VirtualNetworkMemberID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VirtualNetworkMemberID: The internal NetMRI identifier for the Virtual Network Member that corresponds to this VRF route target. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VirtualNetworkMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VirtualNetworkMemberID: If op_VirtualNetworkMemberID is specified, the field named in this input will be compared to the value in VirtualNetworkMemberID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VirtualNetworkMemberID must be specified if op_VirtualNetworkMemberID is specified.
:type val_f_VirtualNetworkMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VirtualNetworkMemberID: If op_VirtualNetworkMemberID is specified, this value will be compared to the value in VirtualNetworkMemberID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VirtualNetworkMemberID must be specified if op_VirtualNetworkMemberID is specified.
:type val_c_VirtualNetworkMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrfDirection: The operator to apply to the field VrfDirection. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrfDirection: The direction of the RT (import or export). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrfDirection: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrfDirection: If op_VrfDirection is specified, the field named in this input will be compared to the value in VrfDirection using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrfDirection must be specified if op_VrfDirection is specified.
:type val_f_VrfDirection: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrfDirection: If op_VrfDirection is specified, this value will be compared to the value in VrfDirection using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrfDirection must be specified if op_VrfDirection is specified.
:type val_c_VrfDirection: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrfRouteTargetChangedCols: The operator to apply to the field VrfRouteTargetChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrfRouteTargetChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrfRouteTargetChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrfRouteTargetChangedCols: If op_VrfRouteTargetChangedCols is specified, the field named in this input will be compared to the value in VrfRouteTargetChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrfRouteTargetChangedCols must be specified if op_VrfRouteTargetChangedCols is specified.
:type val_f_VrfRouteTargetChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrfRouteTargetChangedCols: If op_VrfRouteTargetChangedCols is specified, this value will be compared to the value in VrfRouteTargetChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrfRouteTargetChangedCols must be specified if op_VrfRouteTargetChangedCols is specified.
:type val_c_VrfRouteTargetChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrfRouteTargetEndTime: The operator to apply to the field VrfRouteTargetEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrfRouteTargetEndTime: The ending effective time of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrfRouteTargetEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrfRouteTargetEndTime: If op_VrfRouteTargetEndTime is specified, the field named in this input will be compared to the value in VrfRouteTargetEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrfRouteTargetEndTime must be specified if op_VrfRouteTargetEndTime is specified.
:type val_f_VrfRouteTargetEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrfRouteTargetEndTime: If op_VrfRouteTargetEndTime is specified, this value will be compared to the value in VrfRouteTargetEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrfRouteTargetEndTime must be specified if op_VrfRouteTargetEndTime is specified.
:type val_c_VrfRouteTargetEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrfRouteTargetFirstTime: The operator to apply to the field VrfRouteTargetFirstTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrfRouteTargetFirstTime: The first time this data element was seen on the network. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrfRouteTargetFirstTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrfRouteTargetFirstTime: If op_VrfRouteTargetFirstTime is specified, the field named in this input will be compared to the value in VrfRouteTargetFirstTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrfRouteTargetFirstTime must be specified if op_VrfRouteTargetFirstTime is specified.
:type val_f_VrfRouteTargetFirstTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrfRouteTargetFirstTime: If op_VrfRouteTargetFirstTime is specified, this value will be compared to the value in VrfRouteTargetFirstTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrfRouteTargetFirstTime must be specified if op_VrfRouteTargetFirstTime is specified.
:type val_c_VrfRouteTargetFirstTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrfRouteTargetID: The operator to apply to the field VrfRouteTargetID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrfRouteTargetID: The internal NetMRI identifier for this VRF route target. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrfRouteTargetID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrfRouteTargetID: If op_VrfRouteTargetID is specified, the field named in this input will be compared to the value in VrfRouteTargetID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrfRouteTargetID must be specified if op_VrfRouteTargetID is specified.
:type val_f_VrfRouteTargetID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrfRouteTargetID: If op_VrfRouteTargetID is specified, this value will be compared to the value in VrfRouteTargetID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrfRouteTargetID must be specified if op_VrfRouteTargetID is specified.
:type val_c_VrfRouteTargetID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrfRouteTargetStartTime: The operator to apply to the field VrfRouteTargetStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrfRouteTargetStartTime: The starting effective time of this record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrfRouteTargetStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrfRouteTargetStartTime: If op_VrfRouteTargetStartTime is specified, the field named in this input will be compared to the value in VrfRouteTargetStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrfRouteTargetStartTime must be specified if op_VrfRouteTargetStartTime is specified.
:type val_f_VrfRouteTargetStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrfRouteTargetStartTime: If op_VrfRouteTargetStartTime is specified, this value will be compared to the value in VrfRouteTargetStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrfRouteTargetStartTime must be specified if op_VrfRouteTargetStartTime is specified.
:type val_c_VrfRouteTargetStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VrfRouteTargetTimestamp: The operator to apply to the field VrfRouteTargetTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VrfRouteTargetTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VrfRouteTargetTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VrfRouteTargetTimestamp: If op_VrfRouteTargetTimestamp is specified, the field named in this input will be compared to the value in VrfRouteTargetTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VrfRouteTargetTimestamp must be specified if op_VrfRouteTargetTimestamp is specified.
:type val_f_VrfRouteTargetTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VrfRouteTargetTimestamp: If op_VrfRouteTargetTimestamp is specified, this value will be compared to the value in VrfRouteTargetTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VrfRouteTargetTimestamp must be specified if op_VrfRouteTargetTimestamp is specified.
:type val_c_VrfRouteTargetTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the vrf route targets as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of vrf route target methods. The listed methods will be called on each vrf route target returned and included in the output. Available methods are: data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` VrfRouteTargetID
:param sort: The data field(s) to use for sorting the output. Default is VrfRouteTargetID. Valid values are VrfRouteTargetID, VrfRouteTargetStartTime, VrfRouteTargetEndTime, VrfRouteTargetChangedCols, VrfRouteTargetTimestamp, VrfRouteTargetFirstTime, DataSourceID, DeviceID, VirtualNetworkMemberID, VrfDirection, RTType, RTLeftSide, RTRightSide.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each VrfRouteTarget. Valid values are VrfRouteTargetID, VrfRouteTargetStartTime, VrfRouteTargetEndTime, VrfRouteTargetChangedCols, VrfRouteTargetTimestamp, VrfRouteTargetFirstTime, DataSourceID, DeviceID, VirtualNetworkMemberID, VrfDirection, RTType, RTLeftSide, RTRightSide. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return vrf_route_targets: An array of the VrfRouteTarget objects that match the specified input criteria.
:rtype vrf_route_targets: Array of VrfRouteTarget
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
| 57.30227
| 652
| 0.630541
| 5,759
| 47,962
| 5.199514
| 0.051919
| 0.064788
| 0.042112
| 0.055069
| 0.934812
| 0.933008
| 0.900982
| 0.888826
| 0.883683
| 0.877972
| 0
| 0.003771
| 0.292273
| 47,962
| 837
| 653
| 57.30227
| 0.878388
| 0.83587
| 0
| 0
| 0
| 0
| 0.050279
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.363636
| false
| 0
| 0.090909
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
5348eaba3a38047a74497539c60ddcad993be72a
| 7,605
|
py
|
Python
|
tests/test_assigner.py
|
jiangwenj02/SOLO
|
f0a1de652028236d7935274f51c509008903ad7b
|
[
"BSD-2-Clause"
] | 1,467
|
2020-03-24T01:38:24.000Z
|
2022-03-31T03:02:05.000Z
|
tests/test_assigner.py
|
jiangwenj02/SOLO
|
f0a1de652028236d7935274f51c509008903ad7b
|
[
"BSD-2-Clause"
] | 208
|
2020-03-26T16:24:23.000Z
|
2022-03-30T13:12:07.000Z
|
tests/test_assigner.py
|
jiangwenj02/SOLO
|
f0a1de652028236d7935274f51c509008903ad7b
|
[
"BSD-2-Clause"
] | 300
|
2020-03-24T03:55:02.000Z
|
2022-03-29T19:08:07.000Z
|
"""
Tests the Assigner objects.
CommandLine:
pytest tests/test_assigner.py
xdoctest tests/test_assigner.py zero
"""
import torch
from mmdet.core import MaxIoUAssigner
from mmdet.core.bbox.assigners import ApproxMaxIoUAssigner, PointAssigner
def test_max_iou_assigner():
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 4
assert len(assign_result.labels) == 4
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_ignore():
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
assign_result = self.assign(
bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore)
expected_gt_inds = torch.LongTensor([1, 0, 2, -1])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_empty_gt():
"""
Test corner case where an image might have no true detections
"""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([])
assign_result = self.assign(bboxes, gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_empty_boxes():
"""
Test corner case where an network might predict no boxes
"""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
# Test with gt_labels
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 0
assert tuple(assign_result.labels.shape) == (0, )
# Test without gt_labels
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=None)
assert len(assign_result.gt_inds) == 0
assert assign_result.labels is None
def test_max_iou_assigner_with_empty_boxes_and_gt():
"""
Test corner case where an network might predict no boxes and no gt
"""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
assign_result = self.assign(bboxes, gt_bboxes)
assert len(assign_result.gt_inds) == 0
def test_point_assigner():
self = PointAssigner()
points = torch.FloatTensor([ # [x, y, stride]
[0, 0, 1],
[10, 10, 1],
[5, 5, 1],
[32, 32, 1],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
assign_result = self.assign(points, gt_bboxes)
expected_gt_inds = torch.LongTensor([1, 2, 1, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_point_assigner_with_empty_gt():
"""
Test corner case where an image might have no true detections
"""
self = PointAssigner()
points = torch.FloatTensor([ # [x, y, stride]
[0, 0, 1],
[10, 10, 1],
[5, 5, 1],
[32, 32, 1],
])
gt_bboxes = torch.FloatTensor([])
assign_result = self.assign(points, gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_point_assigner_with_empty_boxes_and_gt():
"""
Test corner case where an image might predict no points and no gt
"""
self = PointAssigner()
points = torch.FloatTensor([])
gt_bboxes = torch.FloatTensor([])
assign_result = self.assign(points, gt_bboxes)
assert len(assign_result.gt_inds) == 0
def test_approx_iou_assigner():
self = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave,
gt_bboxes)
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_approx_iou_assigner_with_empty_gt():
"""
Test corner case where an image might have no true detections
"""
self = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([])
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave,
gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_approx_iou_assigner_with_empty_boxes():
"""
Test corner case where an network might predict no boxes
"""
self = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave,
gt_bboxes)
assert len(assign_result.gt_inds) == 0
def test_approx_iou_assigner_with_empty_boxes_and_gt():
"""
Test corner case where an network might predict no boxes and no gt
"""
self = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave,
gt_bboxes)
assert len(assign_result.gt_inds) == 0
def test_random_assign_result():
"""
Test random instantiation of assign result to catch corner cases
"""
from mmdet.core.bbox.assigners.assign_result import AssignResult
AssignResult.random()
AssignResult.random(num_gts=0, num_preds=0)
AssignResult.random(num_gts=0, num_preds=3)
AssignResult.random(num_gts=3, num_preds=3)
AssignResult.random(num_gts=0, num_preds=3)
AssignResult.random(num_gts=7, num_preds=7)
AssignResult.random(num_gts=7, num_preds=64)
AssignResult.random(num_gts=24, num_preds=3)
| 27.356115
| 73
| 0.614333
| 1,057
| 7,605
| 4.1807
| 0.099338
| 0.089613
| 0.021498
| 0.032587
| 0.869201
| 0.843856
| 0.843856
| 0.817153
| 0.782304
| 0.782304
| 0
| 0.064879
| 0.2643
| 7,605
| 277
| 74
| 27.454874
| 0.724933
| 0.099277
| 0
| 0.772277
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084158
| 1
| 0.064356
| false
| 0
| 0.019802
| 0
| 0.084158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
534cc557abcd61cf61425be063a46c469359f038
| 6,737
|
py
|
Python
|
tests.py
|
erigones/api_ipf
|
2f75bce7b15a409b38c9a8ed32a0d7af27c589f6
|
[
"BSD-3-Clause"
] | null | null | null |
tests.py
|
erigones/api_ipf
|
2f75bce7b15a409b38c9a8ed32a0d7af27c589f6
|
[
"BSD-3-Clause"
] | null | null | null |
tests.py
|
erigones/api_ipf
|
2f75bce7b15a409b38c9a8ed32a0d7af27c589f6
|
[
"BSD-3-Clause"
] | null | null | null |
from django.test import TestCase
from rest_framework import status
from eszone_ipf.settings import BASE_DIR, API_VERSION_PREFIX
class ConfigFileTestCase(TestCase):
url = '/{}/api_ipf/config/'.format(API_VERSION_PREFIX)
url_act = ''.join([url, 'activate/'])
def test_ipf_form_post(self):
title = 'test_ipf.conf'
test_file = ''.join([BASE_DIR, title])
with open(test_file, 'w+') as f:
f.write('# Test file.')
f.seek(0)
files={'title': (title, ''),
'form': ('ipf', ''),
'directory': (title, f.read())}
response = self.client.post(self.url, files=files)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_ipnat_form_post(self):
title = 'test_ipnat.conf'
test_file = ''.join([BASE_DIR, title])
with open(test_file, 'w+') as f:
f.write('# Test file.')
f.seek(0)
files={'title': (title, ''),
'form': ('ipnat', ''),
'directory': (title, f.read())}
response = self.client.post(self.url, files=files)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_ippool_form_post(self):
title = 'test_ippool.conf'
test_file = ''.join([BASE_DIR, title])
with open(test_file, 'w+') as f:
f.write('# Test file.')
f.seek(0)
files={'title': (title, ''),
'form': ('ippool', ''),
'directory': (title, f.read())}
response = self.client.post(self.url, files=files)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_ipf6_form_post(self):
title = 'test_ipf6.conf'
test_file = ''.join([BASE_DIR, title])
with open(test_file, 'w+') as f:
f.write('# Test file.')
f.seek(0)
files={'title': (title, ''),
'form': ('ipf6', ''),
'directory': (title, f.read())}
response = self.client.post(self.url, files=files)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_bad_form_post(self):
title = 'test_ipf.conf'
test_file = ''.join([BASE_DIR, title])
with open(test_file, 'w+') as f:
f.write('# Test file.')
f.seek(0)
files={'title': (title, ''),
'form': ('wrong', ''),
'directory': (title, f.read())}
response = self.client.post(self.url, files=files)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_missing_arg_post(self):
files={'title': ('wrong', ''),
'form': ('wrong', '')}
response = self.client.post(self.url, files=files)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_get_conf_list(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_conf_file(self):
response = self.client.get(''.join([self.url, 'test_ipf.conf/']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_not_existing_conf_file(self):
response = self.client.get(''.join([self.url, 'no_test.conf/']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_update_conf_file(self):
title = 'test_ipf.conf'
test_file = ''.join([BASE_DIR, title])
with open(test_file, 'w+') as f:
f.write('# Test file.')
f.seek(0)
files={'title': (title, ''),
'directory': (title, f.read())}
response = self.client.put(''.join([self.url, 'test_ipf.conf/']),
files=files)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_delete_conf_file(self):
response = self.client.delete(''.join([self.url, 'test_ipf.conf/']))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_activate_ipf_form(self):
response = self.client.get(''.join([self.url_act, 'test_ipf.conf/']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_activate_ipnat_form(self):
response = self.client.get(''.join([self.url_act, 'test_ipnat.conf/']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_activate_ippool_form(self):
response = self.client.get(''.join([self.url_act, 'test_ippool.conf/']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_activate_ipf6_form(self):
response = self.client.get(''.join([self.url_act, 'test_ipf6.conf/']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
class LogFileTestCase(TestCase):
url = '/{}/api_ipf/log/'.format(API_VERSION_PREFIX)
title = 'test.log'
def test_post(self):
response = self.client.post(self.url, data={'title': self.title})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_missing_arg_post(self):
response = self.client.post(self.url, data={})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_get_log_list(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_log_file(self):
response = self.client.get(''.join([self.url, self.title, '/']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_not_existing_log_file(self):
response = self.client.get(''.join([self.url, 'no_test.log/']))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_log_file(self):
response = self.client.delete(''.join([self.url, self.title, '/']))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
class OtherTestCase(TestCase):
url = '/{}/api_ipf/'.format(API_VERSION_PREFIX)
def test_blacklist_update(self):
response = self.client.get(''.join([self.url, 'update/']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_allowed_command(self):
response = self.client.get(''.join([self.url, 'command/ipfstat -io/']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_not_allowed_command(self):
response = self.client.get(''.join([self.url, 'command/pkill python/']))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
| 39.863905
| 80
| 0.612884
| 848
| 6,737
| 4.628538
| 0.097877
| 0.042803
| 0.110064
| 0.177325
| 0.855541
| 0.83949
| 0.824968
| 0.815541
| 0.80535
| 0.76586
| 0
| 0.01617
| 0.238088
| 6,737
| 169
| 81
| 39.863905
| 0.74849
| 0
| 0
| 0.548872
| 0
| 0
| 0.083408
| 0
| 0
| 0
| 0
| 0
| 0.180451
| 1
| 0.180451
| false
| 0
| 0.022556
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5350057b6e3650df35a2812cedb998c429c37191
| 139
|
py
|
Python
|
nimoy/runner/metadata.py
|
browncoat-ninjas/nimoy
|
ff46fd6169c57af2177c0649a3d4c45340e61d3b
|
[
"Apache-2.0"
] | 92
|
2017-09-15T17:35:25.000Z
|
2022-03-24T08:38:02.000Z
|
nimoy/runner/metadata.py
|
Luftzig/nimoy
|
cdc49332674f9ccbfcd1a0ac6bf62625eadcc16d
|
[
"Apache-2.0"
] | 16
|
2017-12-07T05:36:09.000Z
|
2022-02-04T07:40:20.000Z
|
nimoy/runner/metadata.py
|
Luftzig/nimoy
|
cdc49332674f9ccbfcd1a0ac6bf62625eadcc16d
|
[
"Apache-2.0"
] | 9
|
2017-12-17T19:32:56.000Z
|
2020-04-04T14:15:13.000Z
|
class RunnerContext:
def __init__(self, use_power_assertions: bool = False):
self.use_power_assertions = use_power_assertions
| 27.8
| 59
| 0.76259
| 17
| 139
| 5.647059
| 0.588235
| 0.25
| 0.5625
| 0.458333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172662
| 139
| 4
| 60
| 34.75
| 0.834783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
5350aea9d25afc659c7e10955a2694f4dbcbc3f3
| 312,390
|
py
|
Python
|
huaweicloud-sdk-iam/huaweicloudsdkiam/v3/iam_async_client.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-iam/huaweicloudsdkiam/v3/iam_async_client.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-iam/huaweicloudsdkiam/v3/iam_async_client.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
from __future__ import absolute_import
import datetime
import re
import importlib
import six
from huaweicloudsdkcore.client import Client, ClientBuilder
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkcore.utils import http_utils
from huaweicloudsdkcore.sdk_stream_request import SdkStreamRequest
class IamAsyncClient(Client):
"""
:param configuration: .Configuration object for this client
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long,
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
def __init__(self):
super(IamAsyncClient, self).__init__()
self.model_package = importlib.import_module("huaweicloudsdkiam.v3.model")
self.preset_headers = {'User-Agent': 'HuaweiCloud-SDK-Python'}
@classmethod
def new_builder(cls, clazz=None):
if clazz is None:
return ClientBuilder(cls, "GlobalCredentials,BasicCredentials")
if clazz.__name__ != "IamClient":
raise TypeError("client type error, support client type is IamClient")
return ClientBuilder(clazz, "GlobalCredentials,BasicCredentials")
def associate_agency_with_all_projects_permission_async(self, request):
"""为委托授予所有项目服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)为委托授予所有项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param AssociateAgencyWithAllProjectsPermissionRequest request
:return: AssociateAgencyWithAllProjectsPermissionResponse
"""
return self.associate_agency_with_all_projects_permission_with_http_info(request)
def associate_agency_with_all_projects_permission_with_http_info(self, request):
"""为委托授予所有项目服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)为委托授予所有项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param AssociateAgencyWithAllProjectsPermissionRequest request
:return: AssociateAgencyWithAllProjectsPermissionResponse
"""
all_params = ['agency_id', 'domain_id', 'role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'agency_id' in local_var_params:
path_params['agency_id'] = local_var_params['agency_id']
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-INHERIT/domains/{domain_id}/agencies/{agency_id}/roles/{role_id}/inherited_to_projects',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='AssociateAgencyWithAllProjectsPermissionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def associate_agency_with_domain_permission_async(self, request):
"""为委托授予全局服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)为委托授予全局服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param AssociateAgencyWithDomainPermissionRequest request
:return: AssociateAgencyWithDomainPermissionResponse
"""
return self.associate_agency_with_domain_permission_with_http_info(request)
def associate_agency_with_domain_permission_with_http_info(self, request):
"""为委托授予全局服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)为委托授予全局服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param AssociateAgencyWithDomainPermissionRequest request
:return: AssociateAgencyWithDomainPermissionResponse
"""
all_params = ['domain_id', 'agency_id', 'role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
if 'agency_id' in local_var_params:
path_params['agency_id'] = local_var_params['agency_id']
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-AGENCY/domains/{domain_id}/agencies/{agency_id}/roles/{role_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='AssociateAgencyWithDomainPermissionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def associate_agency_with_project_permission_async(self, request):
"""为委托授予项目服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)为委托授予项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param AssociateAgencyWithProjectPermissionRequest request
:return: AssociateAgencyWithProjectPermissionResponse
"""
return self.associate_agency_with_project_permission_with_http_info(request)
def associate_agency_with_project_permission_with_http_info(self, request):
"""为委托授予项目服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)为委托授予项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param AssociateAgencyWithProjectPermissionRequest request
:return: AssociateAgencyWithProjectPermissionResponse
"""
all_params = ['project_id', 'agency_id', 'role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['project_id'] = local_var_params['project_id']
if 'agency_id' in local_var_params:
path_params['agency_id'] = local_var_params['agency_id']
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-AGENCY/projects/{project_id}/agencies/{agency_id}/roles/{role_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='AssociateAgencyWithProjectPermissionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def check_all_projects_permission_for_agency_async(self, request):
"""检查委托下是否具有所有项目服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)检查委托是否具有所有项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CheckAllProjectsPermissionForAgencyRequest request
:return: CheckAllProjectsPermissionForAgencyResponse
"""
return self.check_all_projects_permission_for_agency_with_http_info(request)
def check_all_projects_permission_for_agency_with_http_info(self, request):
"""检查委托下是否具有所有项目服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)检查委托是否具有所有项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CheckAllProjectsPermissionForAgencyRequest request
:return: CheckAllProjectsPermissionForAgencyResponse
"""
all_params = ['agency_id', 'domain_id', 'role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'agency_id' in local_var_params:
path_params['agency_id'] = local_var_params['agency_id']
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-INHERIT/domains/{domain_id}/agencies/{agency_id}/roles/{role_id}/inherited_to_projects',
method='HEAD',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CheckAllProjectsPermissionForAgencyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def check_domain_permission_for_agency_async(self, request):
"""查询委托是否拥有全局服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询委托是否拥有全局服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CheckDomainPermissionForAgencyRequest request
:return: CheckDomainPermissionForAgencyResponse
"""
return self.check_domain_permission_for_agency_with_http_info(request)
def check_domain_permission_for_agency_with_http_info(self, request):
"""查询委托是否拥有全局服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询委托是否拥有全局服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CheckDomainPermissionForAgencyRequest request
:return: CheckDomainPermissionForAgencyResponse
"""
all_params = ['domain_id', 'agency_id', 'role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
if 'agency_id' in local_var_params:
path_params['agency_id'] = local_var_params['agency_id']
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-AGENCY/domains/{domain_id}/agencies/{agency_id}/roles/{role_id}',
method='HEAD',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CheckDomainPermissionForAgencyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def check_project_permission_for_agency_async(self, request):
"""查询委托是否拥有项目服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询委托是否拥有项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CheckProjectPermissionForAgencyRequest request
:return: CheckProjectPermissionForAgencyResponse
"""
return self.check_project_permission_for_agency_with_http_info(request)
def check_project_permission_for_agency_with_http_info(self, request):
"""查询委托是否拥有项目服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询委托是否拥有项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CheckProjectPermissionForAgencyRequest request
:return: CheckProjectPermissionForAgencyResponse
"""
all_params = ['project_id', 'agency_id', 'role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['project_id'] = local_var_params['project_id']
if 'agency_id' in local_var_params:
path_params['agency_id'] = local_var_params['agency_id']
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-AGENCY/projects/{project_id}/agencies/{agency_id}/roles/{role_id}',
method='HEAD',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CheckProjectPermissionForAgencyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_agency_async(self, request):
"""创建委托
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)创建委托。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CreateAgencyRequest request
:return: CreateAgencyResponse
"""
return self.create_agency_with_http_info(request)
def create_agency_with_http_info(self, request):
"""创建委托
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)创建委托。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CreateAgencyRequest request
:return: CreateAgencyResponse
"""
all_params = ['create_agency_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-AGENCY/agencies',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateAgencyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_agency_custom_policy_async(self, request):
"""创建委托自定义策略
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)创建委托自定义策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CreateAgencyCustomPolicyRequest request
:return: CreateAgencyCustomPolicyResponse
"""
return self.create_agency_custom_policy_with_http_info(request)
def create_agency_custom_policy_with_http_info(self, request):
"""创建委托自定义策略
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)创建委托自定义策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CreateAgencyCustomPolicyRequest request
:return: CreateAgencyCustomPolicyResponse
"""
all_params = ['create_agency_custom_policy_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-ROLE/roles',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateAgencyCustomPolicyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_cloud_service_custom_policy_async(self, request):
"""创建云服务自定义策略
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)创建云服务自定义策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CreateCloudServiceCustomPolicyRequest request
:return: CreateCloudServiceCustomPolicyResponse
"""
return self.create_cloud_service_custom_policy_with_http_info(request)
def create_cloud_service_custom_policy_with_http_info(self, request):
"""创建云服务自定义策略
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)创建云服务自定义策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CreateCloudServiceCustomPolicyRequest request
:return: CreateCloudServiceCustomPolicyResponse
"""
all_params = ['create_cloud_service_custom_policy_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-ROLE/roles',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateCloudServiceCustomPolicyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_login_token_async(self, request):
"""获取自定义代理登录票据
该接口用于用于获取自定义代理登录票据logintoken。logintoken是系统颁发给自定义代理用户的登录票据,承载用户的身份、session等信息。调用自定义代理URL登录云服务控制台时,可以使用本接口获取的logintoken进行认证。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。 > - logintoken的有效期为10分钟。
:param CreateLoginTokenRequest request
:return: CreateLoginTokenResponse
"""
return self.create_login_token_with_http_info(request)
def create_login_token_with_http_info(self, request):
"""获取自定义代理登录票据
该接口用于用于获取自定义代理登录票据logintoken。logintoken是系统颁发给自定义代理用户的登录票据,承载用户的身份、session等信息。调用自定义代理URL登录云服务控制台时,可以使用本接口获取的logintoken进行认证。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。 > - logintoken的有效期为10分钟。
:param CreateLoginTokenRequest request
:return: CreateLoginTokenResponse
"""
all_params = ['create_login_token_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = ["X-Subject-LoginToken"]
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-AUTH/securitytoken/logintokens',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateLoginTokenResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_metadata_async(self, request):
"""导入Metadata文件
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)导入Metadata文件。 账号在使用联邦认证功能前,需要先将Metadata文件导入到IAM中。Metadata文件是SAML 2.0协议约定的接口文件,包含访问接口地址和证书信息,请找企业管理员获取企业IdP的Metadata文件。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CreateMetadataRequest request
:return: CreateMetadataResponse
"""
return self.create_metadata_with_http_info(request)
def create_metadata_with_http_info(self, request):
"""导入Metadata文件
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)导入Metadata文件。 账号在使用联邦认证功能前,需要先将Metadata文件导入到IAM中。Metadata文件是SAML 2.0协议约定的接口文件,包含访问接口地址和证书信息,请找企业管理员获取企业IdP的Metadata文件。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CreateMetadataRequest request
:return: CreateMetadataResponse
"""
all_params = ['idp_id', 'protocol_id', 'create_metadata_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'idp_id' in local_var_params:
path_params['idp_id'] = local_var_params['idp_id']
if 'protocol_id' in local_var_params:
path_params['protocol_id'] = local_var_params['protocol_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3-ext/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id}/metadata',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateMetadataResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_open_id_connect_config_async(self, request):
"""创建OpenId Connect身份提供商配置
创建OpenId Connect身份提供商配置
:param CreateOpenIdConnectConfigRequest request
:return: CreateOpenIdConnectConfigResponse
"""
return self.create_open_id_connect_config_with_http_info(request)
def create_open_id_connect_config_with_http_info(self, request):
"""创建OpenId Connect身份提供商配置
创建OpenId Connect身份提供商配置
:param CreateOpenIdConnectConfigRequest request
:return: CreateOpenIdConnectConfigResponse
"""
all_params = ['idp_id', 'create_open_id_connect_config_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'idp_id' in local_var_params:
path_params['idp_id'] = local_var_params['idp_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-FEDERATION/identity-providers/{idp_id}/openid-connect-config',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateOpenIdConnectConfigResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_token_with_id_token_async(self, request):
"""获取联邦认证token(OpenId Connect Id token方式)
获取联邦认证token(OpenId Connect Id token方式)
:param CreateTokenWithIdTokenRequest request
:return: CreateTokenWithIdTokenResponse
"""
return self.create_token_with_id_token_with_http_info(request)
def create_token_with_id_token_with_http_info(self, request):
"""获取联邦认证token(OpenId Connect Id token方式)
获取联邦认证token(OpenId Connect Id token方式)
:param CreateTokenWithIdTokenRequest request
:return: CreateTokenWithIdTokenResponse
"""
all_params = ['x_idp_id', 'body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_idp_id' in local_var_params:
header_params['X-Idp-Id'] = local_var_params['x_idp_id']
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = ["X-Subject-Token"]
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-AUTH/id-token/tokens',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateTokenWithIdTokenResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_agency_async(self, request):
"""删除委托
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)删除委托。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param DeleteAgencyRequest request
:return: DeleteAgencyResponse
"""
return self.delete_agency_with_http_info(request)
def delete_agency_with_http_info(self, request):
"""删除委托
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)删除委托。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param DeleteAgencyRequest request
:return: DeleteAgencyResponse
"""
all_params = ['agency_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'agency_id' in local_var_params:
path_params['agency_id'] = local_var_params['agency_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-AGENCY/agencies/{agency_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteAgencyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_custom_policy_async(self, request):
"""删除自定义策略
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)删除自定义策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param DeleteCustomPolicyRequest request
:return: DeleteCustomPolicyResponse
"""
return self.delete_custom_policy_with_http_info(request)
def delete_custom_policy_with_http_info(self, request):
"""删除自定义策略
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)删除自定义策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param DeleteCustomPolicyRequest request
:return: DeleteCustomPolicyResponse
"""
all_params = ['role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-ROLE/roles/{role_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteCustomPolicyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_domain_group_inherited_role_async(self, request):
"""移除用户组的所有项目服务权限
该接口可以用于移除用户组的所有项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param DeleteDomainGroupInheritedRoleRequest request
:return: DeleteDomainGroupInheritedRoleResponse
"""
return self.delete_domain_group_inherited_role_with_http_info(request)
def delete_domain_group_inherited_role_with_http_info(self, request):
"""移除用户组的所有项目服务权限
该接口可以用于移除用户组的所有项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param DeleteDomainGroupInheritedRoleRequest request
:return: DeleteDomainGroupInheritedRoleResponse
"""
all_params = ['domain_id', 'group_id', 'role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteDomainGroupInheritedRoleResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_add_user_to_group_async(self, request):
"""添加IAM用户到用户组
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)添加IAM用户到用户组。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneAddUserToGroupRequest request
:return: KeystoneAddUserToGroupResponse
"""
return self.keystone_add_user_to_group_with_http_info(request)
def keystone_add_user_to_group_with_http_info(self, request):
"""添加IAM用户到用户组
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)添加IAM用户到用户组。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneAddUserToGroupRequest request
:return: KeystoneAddUserToGroupResponse
"""
all_params = ['group_id', 'user_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
if 'user_id' in local_var_params:
path_params['user_id'] = local_var_params['user_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/groups/{group_id}/users/{user_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneAddUserToGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_associate_group_with_domain_permission_async(self, request):
"""为用户组授予全局服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)为用户组授予全局服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneAssociateGroupWithDomainPermissionRequest request
:return: KeystoneAssociateGroupWithDomainPermissionResponse
"""
return self.keystone_associate_group_with_domain_permission_with_http_info(request)
def keystone_associate_group_with_domain_permission_with_http_info(self, request):
"""为用户组授予全局服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)为用户组授予全局服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneAssociateGroupWithDomainPermissionRequest request
:return: KeystoneAssociateGroupWithDomainPermissionResponse
"""
all_params = ['domain_id', 'group_id', 'role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/domains/{domain_id}/groups/{group_id}/roles/{role_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneAssociateGroupWithDomainPermissionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_associate_group_with_project_permission_async(self, request):
"""为用户组授予项目服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)为用户组授予项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneAssociateGroupWithProjectPermissionRequest request
:return: KeystoneAssociateGroupWithProjectPermissionResponse
"""
return self.keystone_associate_group_with_project_permission_with_http_info(request)
def keystone_associate_group_with_project_permission_with_http_info(self, request):
"""为用户组授予项目服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)为用户组授予项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneAssociateGroupWithProjectPermissionRequest request
:return: KeystoneAssociateGroupWithProjectPermissionResponse
"""
all_params = ['project_id', 'group_id', 'role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['project_id'] = local_var_params['project_id']
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/projects/{project_id}/groups/{group_id}/roles/{role_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneAssociateGroupWithProjectPermissionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_check_domain_permission_for_group_async(self, request):
"""查询用户组是否拥有全局服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询用户组是否拥有全局服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCheckDomainPermissionForGroupRequest request
:return: KeystoneCheckDomainPermissionForGroupResponse
"""
return self.keystone_check_domain_permission_for_group_with_http_info(request)
def keystone_check_domain_permission_for_group_with_http_info(self, request):
"""查询用户组是否拥有全局服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询用户组是否拥有全局服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCheckDomainPermissionForGroupRequest request
:return: KeystoneCheckDomainPermissionForGroupResponse
"""
all_params = ['domain_id', 'group_id', 'role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/domains/{domain_id}/groups/{group_id}/roles/{role_id}',
method='HEAD',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneCheckDomainPermissionForGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_check_project_permission_for_group_async(self, request):
"""查询用户组是否拥有项目服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询用户组是否拥有项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCheckProjectPermissionForGroupRequest request
:return: KeystoneCheckProjectPermissionForGroupResponse
"""
return self.keystone_check_project_permission_for_group_with_http_info(request)
def keystone_check_project_permission_for_group_with_http_info(self, request):
"""查询用户组是否拥有项目服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询用户组是否拥有项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCheckProjectPermissionForGroupRequest request
:return: KeystoneCheckProjectPermissionForGroupResponse
"""
all_params = ['project_id', 'group_id', 'role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['project_id'] = local_var_params['project_id']
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/projects/{project_id}/groups/{group_id}/roles/{role_id}',
method='HEAD',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneCheckProjectPermissionForGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_check_user_in_group_async(self, request):
"""查询IAM用户是否在用户组中
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询IAM用户是否在用户组中。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCheckUserInGroupRequest request
:return: KeystoneCheckUserInGroupResponse
"""
return self.keystone_check_user_in_group_with_http_info(request)
def keystone_check_user_in_group_with_http_info(self, request):
"""查询IAM用户是否在用户组中
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询IAM用户是否在用户组中。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCheckUserInGroupRequest request
:return: KeystoneCheckUserInGroupResponse
"""
all_params = ['group_id', 'user_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
if 'user_id' in local_var_params:
path_params['user_id'] = local_var_params['user_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/groups/{group_id}/users/{user_id}',
method='HEAD',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneCheckUserInGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_checkrole_for_group_async(self, request):
"""查询用户组是否拥有所有项目指定权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询用户组是否拥有所有项目指定权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCheckroleForGroupRequest request
:return: KeystoneCheckroleForGroupResponse
"""
return self.keystone_checkrole_for_group_with_http_info(request)
def keystone_checkrole_for_group_with_http_info(self, request):
"""查询用户组是否拥有所有项目指定权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询用户组是否拥有所有项目指定权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCheckroleForGroupRequest request
:return: KeystoneCheckroleForGroupResponse
"""
all_params = ['domain_id', 'group_id', 'role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects',
method='HEAD',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneCheckroleForGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_create_group_async(self, request):
"""创建用户组
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)创建用户组。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCreateGroupRequest request
:return: KeystoneCreateGroupResponse
"""
return self.keystone_create_group_with_http_info(request)
def keystone_create_group_with_http_info(self, request):
"""创建用户组
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)创建用户组。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCreateGroupRequest request
:return: KeystoneCreateGroupResponse
"""
all_params = ['keystone_create_group_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3/groups',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneCreateGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_create_identity_provider_async(self, request):
"""注册身份提供商
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)注册身份提供商。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCreateIdentityProviderRequest request
:return: KeystoneCreateIdentityProviderResponse
"""
return self.keystone_create_identity_provider_with_http_info(request)
def keystone_create_identity_provider_with_http_info(self, request):
"""注册身份提供商
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)注册身份提供商。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCreateIdentityProviderRequest request
:return: KeystoneCreateIdentityProviderResponse
"""
all_params = ['id', 'keystone_create_identity_provider_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3/OS-FEDERATION/identity_providers/{id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneCreateIdentityProviderResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_create_mapping_async(self, request):
"""注册映射
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)注册映射。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCreateMappingRequest request
:return: KeystoneCreateMappingResponse
"""
return self.keystone_create_mapping_with_http_info(request)
def keystone_create_mapping_with_http_info(self, request):
"""注册映射
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)注册映射。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCreateMappingRequest request
:return: KeystoneCreateMappingResponse
"""
all_params = ['id', 'keystone_create_mapping_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3/OS-FEDERATION/mappings/{id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneCreateMappingResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_create_project_async(self, request):
"""创建项目
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)创建项目。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCreateProjectRequest request
:return: KeystoneCreateProjectResponse
"""
return self.keystone_create_project_with_http_info(request)
def keystone_create_project_with_http_info(self, request):
"""创建项目
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)创建项目。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCreateProjectRequest request
:return: KeystoneCreateProjectResponse
"""
all_params = ['keystone_create_project_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3/projects',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneCreateProjectResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_create_protocol_async(self, request):
"""注册协议
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)注册协议(将协议关联到某一身份提供商)。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCreateProtocolRequest request
:return: KeystoneCreateProtocolResponse
"""
return self.keystone_create_protocol_with_http_info(request)
def keystone_create_protocol_with_http_info(self, request):
"""注册协议
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)注册协议(将协议关联到某一身份提供商)。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCreateProtocolRequest request
:return: KeystoneCreateProtocolResponse
"""
all_params = ['idp_id', 'protocol_id', 'keystone_create_protocol_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'idp_id' in local_var_params:
path_params['idp_id'] = local_var_params['idp_id']
if 'protocol_id' in local_var_params:
path_params['protocol_id'] = local_var_params['protocol_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneCreateProtocolResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_create_scoped_token_async(self, request):
"""获取联邦认证scoped token
该接口可以用于通过联邦认证方式获取scoped token。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCreateScopedTokenRequest request
:return: KeystoneCreateScopedTokenResponse
"""
return self.keystone_create_scoped_token_with_http_info(request)
def keystone_create_scoped_token_with_http_info(self, request):
"""获取联邦认证scoped token
该接口可以用于通过联邦认证方式获取scoped token。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCreateScopedTokenRequest request
:return: KeystoneCreateScopedTokenResponse
"""
all_params = ['keystone_create_scoped_token_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = ["X-Subject-Token"]
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3/auth/tokens',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneCreateScopedTokenResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_delete_group_async(self, request):
"""删除用户组
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)删除用户组。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneDeleteGroupRequest request
:return: KeystoneDeleteGroupResponse
"""
return self.keystone_delete_group_with_http_info(request)
def keystone_delete_group_with_http_info(self, request):
"""删除用户组
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)删除用户组。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneDeleteGroupRequest request
:return: KeystoneDeleteGroupResponse
"""
all_params = ['group_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/groups/{group_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneDeleteGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_delete_identity_provider_async(self, request):
"""删除身份提供商
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html) 删除身份提供商。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneDeleteIdentityProviderRequest request
:return: KeystoneDeleteIdentityProviderResponse
"""
return self.keystone_delete_identity_provider_with_http_info(request)
def keystone_delete_identity_provider_with_http_info(self, request):
"""删除身份提供商
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html) 删除身份提供商。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneDeleteIdentityProviderRequest request
:return: KeystoneDeleteIdentityProviderResponse
"""
all_params = ['id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/OS-FEDERATION/identity_providers/{id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneDeleteIdentityProviderResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_delete_mapping_async(self, request):
"""删除映射
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)删除映射。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneDeleteMappingRequest request
:return: KeystoneDeleteMappingResponse
"""
return self.keystone_delete_mapping_with_http_info(request)
def keystone_delete_mapping_with_http_info(self, request):
"""删除映射
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)删除映射。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneDeleteMappingRequest request
:return: KeystoneDeleteMappingResponse
"""
all_params = ['id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/OS-FEDERATION/mappings/{id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneDeleteMappingResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_delete_protocol_async(self, request):
"""删除协议
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)删除协议。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneDeleteProtocolRequest request
:return: KeystoneDeleteProtocolResponse
"""
return self.keystone_delete_protocol_with_http_info(request)
def keystone_delete_protocol_with_http_info(self, request):
"""删除协议
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)删除协议。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneDeleteProtocolRequest request
:return: KeystoneDeleteProtocolResponse
"""
all_params = ['idp_id', 'protocol_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'idp_id' in local_var_params:
path_params['idp_id'] = local_var_params['idp_id']
if 'protocol_id' in local_var_params:
path_params['protocol_id'] = local_var_params['protocol_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneDeleteProtocolResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_list_all_project_permissions_for_group_async(self, request):
"""查询用户组的所有项目权限列表
该接口可以用于管理员查询用户组所有项目服务权限列表。 \\n\\n该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListAllProjectPermissionsForGroupRequest request
:return: KeystoneListAllProjectPermissionsForGroupResponse
"""
return self.keystone_list_all_project_permissions_for_group_with_http_info(request)
def keystone_list_all_project_permissions_for_group_with_http_info(self, request):
"""查询用户组的所有项目权限列表
该接口可以用于管理员查询用户组所有项目服务权限列表。 \\n\\n该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListAllProjectPermissionsForGroupRequest request
:return: KeystoneListAllProjectPermissionsForGroupResponse
"""
all_params = ['domain_id', 'group_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/inherited_to_projects',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneListAllProjectPermissionsForGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_list_auth_domains_async(self, request):
"""查询IAM用户可以访问的账号详情
该接口可以用于查询IAM用户可以用访问的账号详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListAuthDomainsRequest request
:return: KeystoneListAuthDomainsResponse
"""
return self.keystone_list_auth_domains_with_http_info(request)
def keystone_list_auth_domains_with_http_info(self, request):
"""查询IAM用户可以访问的账号详情
该接口可以用于查询IAM用户可以用访问的账号详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListAuthDomainsRequest request
:return: KeystoneListAuthDomainsResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/auth/domains',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneListAuthDomainsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_list_auth_projects_async(self, request):
"""查询IAM用户可以访问的项目列表
该接口可以用于查询IAM用户可以访问的项目列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListAuthProjectsRequest request
:return: KeystoneListAuthProjectsResponse
"""
return self.keystone_list_auth_projects_with_http_info(request)
def keystone_list_auth_projects_with_http_info(self, request):
"""查询IAM用户可以访问的项目列表
该接口可以用于查询IAM用户可以访问的项目列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListAuthProjectsRequest request
:return: KeystoneListAuthProjectsResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/auth/projects',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneListAuthProjectsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_list_domain_permissions_for_group_async(self, request):
"""查询全局服务中的用户组权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询全局服务中的用户组权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListDomainPermissionsForGroupRequest request
:return: KeystoneListDomainPermissionsForGroupResponse
"""
return self.keystone_list_domain_permissions_for_group_with_http_info(request)
def keystone_list_domain_permissions_for_group_with_http_info(self, request):
"""查询全局服务中的用户组权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询全局服务中的用户组权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListDomainPermissionsForGroupRequest request
:return: KeystoneListDomainPermissionsForGroupResponse
"""
all_params = ['domain_id', 'group_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/domains/{domain_id}/groups/{group_id}/roles',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneListDomainPermissionsForGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_list_endpoints_async(self, request):
"""查询终端节点列表
该接口可以用于查询终端节点列表。终端节点用来提供服务访问入口。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListEndpointsRequest request
:return: KeystoneListEndpointsResponse
"""
return self.keystone_list_endpoints_with_http_info(request)
def keystone_list_endpoints_with_http_info(self, request):
"""查询终端节点列表
该接口可以用于查询终端节点列表。终端节点用来提供服务访问入口。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListEndpointsRequest request
:return: KeystoneListEndpointsResponse
"""
all_params = ['interface', 'service_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'interface' in local_var_params:
query_params.append(('interface', local_var_params['interface']))
if 'service_id' in local_var_params:
query_params.append(('service_id', local_var_params['service_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/endpoints',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneListEndpointsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_list_groups_async(self, request):
"""查询用户组列表
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询用户组列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListGroupsRequest request
:return: KeystoneListGroupsResponse
"""
return self.keystone_list_groups_with_http_info(request)
def keystone_list_groups_with_http_info(self, request):
"""查询用户组列表
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询用户组列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListGroupsRequest request
:return: KeystoneListGroupsResponse
"""
all_params = ['domain_id', 'name']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'domain_id' in local_var_params:
query_params.append(('domain_id', local_var_params['domain_id']))
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/groups',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneListGroupsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_list_identity_providers_async(self, request):
"""查询身份提供商列表
该接口可以用于查询身份提供商列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListIdentityProvidersRequest request
:return: KeystoneListIdentityProvidersResponse
"""
return self.keystone_list_identity_providers_with_http_info(request)
def keystone_list_identity_providers_with_http_info(self, request):
"""查询身份提供商列表
该接口可以用于查询身份提供商列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListIdentityProvidersRequest request
:return: KeystoneListIdentityProvidersResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/OS-FEDERATION/identity_providers',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneListIdentityProvidersResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_list_mappings_async(self, request):
"""查询映射列表
该接口可以用于查询映射列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListMappingsRequest request
:return: KeystoneListMappingsResponse
"""
return self.keystone_list_mappings_with_http_info(request)
def keystone_list_mappings_with_http_info(self, request):
"""查询映射列表
该接口可以用于查询映射列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListMappingsRequest request
:return: KeystoneListMappingsResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/OS-FEDERATION/mappings',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneListMappingsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_list_permissions_async(self, request):
"""查询权限列表
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询权限列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListPermissionsRequest request
:return: KeystoneListPermissionsResponse
"""
return self.keystone_list_permissions_with_http_info(request)
def keystone_list_permissions_with_http_info(self, request):
"""查询权限列表
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询权限列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListPermissionsRequest request
:return: KeystoneListPermissionsResponse
"""
all_params = ['name', 'domain_id', 'page', 'per_page', 'permission_type', 'display_name', 'type', 'catalog']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
if 'domain_id' in local_var_params:
query_params.append(('domain_id', local_var_params['domain_id']))
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page']))
if 'per_page' in local_var_params:
query_params.append(('per_page', local_var_params['per_page']))
if 'permission_type' in local_var_params:
query_params.append(('permission_type', local_var_params['permission_type']))
if 'display_name' in local_var_params:
query_params.append(('display_name', local_var_params['display_name']))
if 'type' in local_var_params:
query_params.append(('type', local_var_params['type']))
if 'catalog' in local_var_params:
query_params.append(('catalog', local_var_params['catalog']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/roles',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneListPermissionsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_list_project_permissions_for_group_async(self, request):
"""查询项目服务中的用户组权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询项目服务中的用户组权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListProjectPermissionsForGroupRequest request
:return: KeystoneListProjectPermissionsForGroupResponse
"""
return self.keystone_list_project_permissions_for_group_with_http_info(request)
def keystone_list_project_permissions_for_group_with_http_info(self, request):
"""查询项目服务中的用户组权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询项目服务中的用户组权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListProjectPermissionsForGroupRequest request
:return: KeystoneListProjectPermissionsForGroupResponse
"""
all_params = ['project_id', 'group_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['project_id'] = local_var_params['project_id']
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/projects/{project_id}/groups/{group_id}/roles',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneListProjectPermissionsForGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_list_projects_async(self, request):
"""查询指定条件下的项目列表
该接口可以用于查询指定条件下的项目列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListProjectsRequest request
:return: KeystoneListProjectsResponse
"""
return self.keystone_list_projects_with_http_info(request)
def keystone_list_projects_with_http_info(self, request):
"""查询指定条件下的项目列表
该接口可以用于查询指定条件下的项目列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListProjectsRequest request
:return: KeystoneListProjectsResponse
"""
all_params = ['domain_id', 'name', 'parent_id', 'enabled', 'is_domain', 'page', 'per_page']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'domain_id' in local_var_params:
query_params.append(('domain_id', local_var_params['domain_id']))
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
if 'parent_id' in local_var_params:
query_params.append(('parent_id', local_var_params['parent_id']))
if 'enabled' in local_var_params:
query_params.append(('enabled', local_var_params['enabled']))
if 'is_domain' in local_var_params:
query_params.append(('is_domain', local_var_params['is_domain']))
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page']))
if 'per_page' in local_var_params:
query_params.append(('per_page', local_var_params['per_page']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/projects',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneListProjectsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_list_projects_for_user_async(self, request):
"""查询指定IAM用户的项目列表
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询指定IAM用户的项目列表,或IAM用户查询自己的项目列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListProjectsForUserRequest request
:return: KeystoneListProjectsForUserResponse
"""
return self.keystone_list_projects_for_user_with_http_info(request)
def keystone_list_projects_for_user_with_http_info(self, request):
"""查询指定IAM用户的项目列表
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询指定IAM用户的项目列表,或IAM用户查询自己的项目列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListProjectsForUserRequest request
:return: KeystoneListProjectsForUserResponse
"""
all_params = ['user_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'user_id' in local_var_params:
path_params['user_id'] = local_var_params['user_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/users/{user_id}/projects',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneListProjectsForUserResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_list_protocols_async(self, request):
"""查询协议列表
该接口可以用于查询协议列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListProtocolsRequest request
:return: KeystoneListProtocolsResponse
"""
return self.keystone_list_protocols_with_http_info(request)
def keystone_list_protocols_with_http_info(self, request):
"""查询协议列表
该接口可以用于查询协议列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListProtocolsRequest request
:return: KeystoneListProtocolsResponse
"""
all_params = ['idp_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'idp_id' in local_var_params:
path_params['idp_id'] = local_var_params['idp_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/OS-FEDERATION/identity_providers/{idp_id}/protocols',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneListProtocolsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_list_regions_async(self, request):
"""查询区域列表
该接口可以用于查询区域列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListRegionsRequest request
:return: KeystoneListRegionsResponse
"""
return self.keystone_list_regions_with_http_info(request)
def keystone_list_regions_with_http_info(self, request):
"""查询区域列表
该接口可以用于查询区域列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListRegionsRequest request
:return: KeystoneListRegionsResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/regions',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneListRegionsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_list_services_async(self, request):
"""查询服务列表
该接口可以用于查询服务列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListServicesRequest request
:return: KeystoneListServicesResponse
"""
return self.keystone_list_services_with_http_info(request)
def keystone_list_services_with_http_info(self, request):
"""查询服务列表
该接口可以用于查询服务列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListServicesRequest request
:return: KeystoneListServicesResponse
"""
all_params = ['type']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'type' in local_var_params:
query_params.append(('type', local_var_params['type']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/services',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneListServicesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_list_users_for_group_by_admin_async(self, request):
"""管理员查询用户组所包含的IAM用户
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询用户组中所包含的IAM用户。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListUsersForGroupByAdminRequest request
:return: KeystoneListUsersForGroupByAdminResponse
"""
return self.keystone_list_users_for_group_by_admin_with_http_info(request)
def keystone_list_users_for_group_by_admin_with_http_info(self, request):
"""管理员查询用户组所包含的IAM用户
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询用户组中所包含的IAM用户。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListUsersForGroupByAdminRequest request
:return: KeystoneListUsersForGroupByAdminResponse
"""
all_params = ['group_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/groups/{group_id}/users',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneListUsersForGroupByAdminResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_list_versions_async(self, request):
"""查询版本信息列表
该接口用于查询Keystone API的版本信息。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListVersionsRequest request
:return: KeystoneListVersionsResponse
"""
return self.keystone_list_versions_with_http_info(request)
def keystone_list_versions_with_http_info(self, request):
"""查询版本信息列表
该接口用于查询Keystone API的版本信息。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListVersionsRequest request
:return: KeystoneListVersionsResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneListVersionsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_remove_domain_permission_from_group_async(self, request):
"""移除用户组的全局服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)移除用户组的全局服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneRemoveDomainPermissionFromGroupRequest request
:return: KeystoneRemoveDomainPermissionFromGroupResponse
"""
return self.keystone_remove_domain_permission_from_group_with_http_info(request)
def keystone_remove_domain_permission_from_group_with_http_info(self, request):
"""移除用户组的全局服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)移除用户组的全局服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneRemoveDomainPermissionFromGroupRequest request
:return: KeystoneRemoveDomainPermissionFromGroupResponse
"""
all_params = ['domain_id', 'group_id', 'role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/domains/{domain_id}/groups/{group_id}/roles/{role_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneRemoveDomainPermissionFromGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_remove_project_permission_from_group_async(self, request):
"""移除用户组的项目服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)移除用户组的项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneRemoveProjectPermissionFromGroupRequest request
:return: KeystoneRemoveProjectPermissionFromGroupResponse
"""
return self.keystone_remove_project_permission_from_group_with_http_info(request)
def keystone_remove_project_permission_from_group_with_http_info(self, request):
"""移除用户组的项目服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)移除用户组的项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneRemoveProjectPermissionFromGroupRequest request
:return: KeystoneRemoveProjectPermissionFromGroupResponse
"""
all_params = ['project_id', 'group_id', 'role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['project_id'] = local_var_params['project_id']
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/projects/{project_id}/groups/{group_id}/roles/{role_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneRemoveProjectPermissionFromGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_remove_user_from_group_async(self, request):
"""移除用户组中的IAM用户
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)移除用户组中的IAM用户。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneRemoveUserFromGroupRequest request
:return: KeystoneRemoveUserFromGroupResponse
"""
return self.keystone_remove_user_from_group_with_http_info(request)
def keystone_remove_user_from_group_with_http_info(self, request):
"""移除用户组中的IAM用户
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)移除用户组中的IAM用户。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneRemoveUserFromGroupRequest request
:return: KeystoneRemoveUserFromGroupResponse
"""
all_params = ['group_id', 'user_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
if 'user_id' in local_var_params:
path_params['user_id'] = local_var_params['user_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/groups/{group_id}/users/{user_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneRemoveUserFromGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_show_catalog_async(self, request):
"""查询服务目录
该接口可以用于查询请求头中X-Auth-Token对应的服务目录。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowCatalogRequest request
:return: KeystoneShowCatalogResponse
"""
return self.keystone_show_catalog_with_http_info(request)
def keystone_show_catalog_with_http_info(self, request):
"""查询服务目录
该接口可以用于查询请求头中X-Auth-Token对应的服务目录。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowCatalogRequest request
:return: KeystoneShowCatalogResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/auth/catalog',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneShowCatalogResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_show_endpoint_async(self, request):
"""查询终端节点详情
该接口可以用于查询终端节点详情。终端节点用来提供服务访问入口。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowEndpointRequest request
:return: KeystoneShowEndpointResponse
"""
return self.keystone_show_endpoint_with_http_info(request)
def keystone_show_endpoint_with_http_info(self, request):
"""查询终端节点详情
该接口可以用于查询终端节点详情。终端节点用来提供服务访问入口。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowEndpointRequest request
:return: KeystoneShowEndpointResponse
"""
all_params = ['endpoint_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'endpoint_id' in local_var_params:
path_params['endpoint_id'] = local_var_params['endpoint_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/endpoints/{endpoint_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneShowEndpointResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_show_group_async(self, request):
"""查询用户组详情
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询用户组详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowGroupRequest request
:return: KeystoneShowGroupResponse
"""
return self.keystone_show_group_with_http_info(request)
def keystone_show_group_with_http_info(self, request):
"""查询用户组详情
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询用户组详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowGroupRequest request
:return: KeystoneShowGroupResponse
"""
all_params = ['group_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/groups/{group_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneShowGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_show_identity_provider_async(self, request):
"""查询身份提供商详情
该接口可以用于查询身份提供商详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowIdentityProviderRequest request
:return: KeystoneShowIdentityProviderResponse
"""
return self.keystone_show_identity_provider_with_http_info(request)
def keystone_show_identity_provider_with_http_info(self, request):
"""查询身份提供商详情
该接口可以用于查询身份提供商详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowIdentityProviderRequest request
:return: KeystoneShowIdentityProviderResponse
"""
all_params = ['id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/OS-FEDERATION/identity_providers/{id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneShowIdentityProviderResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_show_mapping_async(self, request):
"""查询映射详情
该接口可以用于查询映射详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowMappingRequest request
:return: KeystoneShowMappingResponse
"""
return self.keystone_show_mapping_with_http_info(request)
def keystone_show_mapping_with_http_info(self, request):
"""查询映射详情
该接口可以用于查询映射详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowMappingRequest request
:return: KeystoneShowMappingResponse
"""
all_params = ['id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/OS-FEDERATION/mappings/{id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneShowMappingResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_show_permission_async(self, request):
"""查询权限详情
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询权限详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowPermissionRequest request
:return: KeystoneShowPermissionResponse
"""
return self.keystone_show_permission_with_http_info(request)
def keystone_show_permission_with_http_info(self, request):
"""查询权限详情
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询权限详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowPermissionRequest request
:return: KeystoneShowPermissionResponse
"""
all_params = ['role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/roles/{role_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneShowPermissionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_show_project_async(self, request):
"""查询项目详情
该接口可以用于查询项目详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowProjectRequest request
:return: KeystoneShowProjectResponse
"""
return self.keystone_show_project_with_http_info(request)
def keystone_show_project_with_http_info(self, request):
"""查询项目详情
该接口可以用于查询项目详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowProjectRequest request
:return: KeystoneShowProjectResponse
"""
all_params = ['project_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['project_id'] = local_var_params['project_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/projects/{project_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneShowProjectResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_show_protocol_async(self, request):
"""查询协议详情
该接口可以用于查询协议详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowProtocolRequest request
:return: KeystoneShowProtocolResponse
"""
return self.keystone_show_protocol_with_http_info(request)
def keystone_show_protocol_with_http_info(self, request):
"""查询协议详情
该接口可以用于查询协议详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowProtocolRequest request
:return: KeystoneShowProtocolResponse
"""
all_params = ['idp_id', 'protocol_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'idp_id' in local_var_params:
path_params['idp_id'] = local_var_params['idp_id']
if 'protocol_id' in local_var_params:
path_params['protocol_id'] = local_var_params['protocol_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneShowProtocolResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_show_region_async(self, request):
"""查询区域详情
该接口可以用于查询区域详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowRegionRequest request
:return: KeystoneShowRegionResponse
"""
return self.keystone_show_region_with_http_info(request)
def keystone_show_region_with_http_info(self, request):
"""查询区域详情
该接口可以用于查询区域详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowRegionRequest request
:return: KeystoneShowRegionResponse
"""
all_params = ['region_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'region_id' in local_var_params:
path_params['region_id'] = local_var_params['region_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/regions/{region_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneShowRegionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_show_security_compliance_async(self, request):
"""查询账号密码强度策略
该接口可以用于查询账号密码强度策略,查询结果包括密码强度策略的正则表达式及其描述。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowSecurityComplianceRequest request
:return: KeystoneShowSecurityComplianceResponse
"""
return self.keystone_show_security_compliance_with_http_info(request)
def keystone_show_security_compliance_with_http_info(self, request):
"""查询账号密码强度策略
该接口可以用于查询账号密码强度策略,查询结果包括密码强度策略的正则表达式及其描述。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowSecurityComplianceRequest request
:return: KeystoneShowSecurityComplianceResponse
"""
all_params = ['domain_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/domains/{domain_id}/config/security_compliance',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneShowSecurityComplianceResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_show_security_compliance_by_option_async(self, request):
"""按条件查询账号密码强度策略
该接口可以用于按条件查询账号密码强度策略,查询结果包括密码强度策略的正则表达式及其描述。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowSecurityComplianceByOptionRequest request
:return: KeystoneShowSecurityComplianceByOptionResponse
"""
return self.keystone_show_security_compliance_by_option_with_http_info(request)
def keystone_show_security_compliance_by_option_with_http_info(self, request):
"""按条件查询账号密码强度策略
该接口可以用于按条件查询账号密码强度策略,查询结果包括密码强度策略的正则表达式及其描述。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowSecurityComplianceByOptionRequest request
:return: KeystoneShowSecurityComplianceByOptionResponse
"""
all_params = ['domain_id', 'option']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
if 'option' in local_var_params:
path_params['option'] = local_var_params['option']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/domains/{domain_id}/config/security_compliance/{option}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneShowSecurityComplianceByOptionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_show_service_async(self, request):
"""查询服务详情
该接口可以用于查询服务详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowServiceRequest request
:return: KeystoneShowServiceResponse
"""
return self.keystone_show_service_with_http_info(request)
def keystone_show_service_with_http_info(self, request):
"""查询服务详情
该接口可以用于查询服务详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowServiceRequest request
:return: KeystoneShowServiceResponse
"""
all_params = ['service_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'service_id' in local_var_params:
path_params['service_id'] = local_var_params['service_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/services/{service_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneShowServiceResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_show_version_async(self, request):
"""查询版本信息
该接口用于查询Keystone API的3.0版本的信息。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowVersionRequest request
:return: KeystoneShowVersionResponse
"""
return self.keystone_show_version_with_http_info(request)
def keystone_show_version_with_http_info(self, request):
"""查询版本信息
该接口用于查询Keystone API的3.0版本的信息。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowVersionRequest request
:return: KeystoneShowVersionResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneShowVersionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_update_group_async(self, request):
"""更新用户组
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)更新用户组信息。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneUpdateGroupRequest request
:return: KeystoneUpdateGroupResponse
"""
return self.keystone_update_group_with_http_info(request)
def keystone_update_group_with_http_info(self, request):
"""更新用户组
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)更新用户组信息。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneUpdateGroupRequest request
:return: KeystoneUpdateGroupResponse
"""
all_params = ['group_id', 'keystone_update_group_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3/groups/{group_id}',
method='PATCH',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneUpdateGroupResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_update_identity_provider_async(self, request):
"""更新身份提供商
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)更新身份提供商。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneUpdateIdentityProviderRequest request
:return: KeystoneUpdateIdentityProviderResponse
"""
return self.keystone_update_identity_provider_with_http_info(request)
def keystone_update_identity_provider_with_http_info(self, request):
"""更新身份提供商
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)更新身份提供商。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneUpdateIdentityProviderRequest request
:return: KeystoneUpdateIdentityProviderResponse
"""
all_params = ['id', 'keystone_update_identity_provider_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3/OS-FEDERATION/identity_providers/{id}',
method='PATCH',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneUpdateIdentityProviderResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_update_mapping_async(self, request):
"""更新映射
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)更新映射。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneUpdateMappingRequest request
:return: KeystoneUpdateMappingResponse
"""
return self.keystone_update_mapping_with_http_info(request)
def keystone_update_mapping_with_http_info(self, request):
"""更新映射
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)更新映射。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneUpdateMappingRequest request
:return: KeystoneUpdateMappingResponse
"""
all_params = ['id', 'keystone_update_mapping_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3/OS-FEDERATION/mappings/{id}',
method='PATCH',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneUpdateMappingResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_update_project_async(self, request):
"""修改项目信息
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改项目信息。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneUpdateProjectRequest request
:return: KeystoneUpdateProjectResponse
"""
return self.keystone_update_project_with_http_info(request)
def keystone_update_project_with_http_info(self, request):
"""修改项目信息
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改项目信息。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneUpdateProjectRequest request
:return: KeystoneUpdateProjectResponse
"""
all_params = ['project_id', 'keystone_update_project_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['project_id'] = local_var_params['project_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3/projects/{project_id}',
method='PATCH',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneUpdateProjectResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_update_protocol_async(self, request):
"""更新协议
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)更新协议。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneUpdateProtocolRequest request
:return: KeystoneUpdateProtocolResponse
"""
return self.keystone_update_protocol_with_http_info(request)
def keystone_update_protocol_with_http_info(self, request):
"""更新协议
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)更新协议。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneUpdateProtocolRequest request
:return: KeystoneUpdateProtocolResponse
"""
all_params = ['idp_id', 'protocol_id', 'keystone_update_protocol_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'idp_id' in local_var_params:
path_params['idp_id'] = local_var_params['idp_id']
if 'protocol_id' in local_var_params:
path_params['protocol_id'] = local_var_params['protocol_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id}',
method='PATCH',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneUpdateProtocolResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_agencies_async(self, request):
"""查询指定条件下的委托列表
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询指定条件下的委托列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ListAgenciesRequest request
:return: ListAgenciesResponse
"""
return self.list_agencies_with_http_info(request)
def list_agencies_with_http_info(self, request):
"""查询指定条件下的委托列表
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询指定条件下的委托列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ListAgenciesRequest request
:return: ListAgenciesResponse
"""
all_params = ['domain_id', 'trust_domain_id', 'name']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'domain_id' in local_var_params:
query_params.append(('domain_id', local_var_params['domain_id']))
if 'trust_domain_id' in local_var_params:
query_params.append(('trust_domain_id', local_var_params['trust_domain_id']))
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-AGENCY/agencies',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListAgenciesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_all_projects_permissions_for_agency_async(self, request):
"""查询委托下的所有项目服务权限列表
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询委托所有项目服务权限列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ListAllProjectsPermissionsForAgencyRequest request
:return: ListAllProjectsPermissionsForAgencyResponse
"""
return self.list_all_projects_permissions_for_agency_with_http_info(request)
def list_all_projects_permissions_for_agency_with_http_info(self, request):
"""查询委托下的所有项目服务权限列表
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询委托所有项目服务权限列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ListAllProjectsPermissionsForAgencyRequest request
:return: ListAllProjectsPermissionsForAgencyResponse
"""
all_params = ['agency_id', 'domain_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'agency_id' in local_var_params:
path_params['agency_id'] = local_var_params['agency_id']
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-INHERIT/domains/{domain_id}/agencies/{agency_id}/roles/inherited_to_projects',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListAllProjectsPermissionsForAgencyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_custom_policies_async(self, request):
"""查询自定义策略列表
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询自定义策略列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ListCustomPoliciesRequest request
:return: ListCustomPoliciesResponse
"""
return self.list_custom_policies_with_http_info(request)
def list_custom_policies_with_http_info(self, request):
"""查询自定义策略列表
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询自定义策略列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ListCustomPoliciesRequest request
:return: ListCustomPoliciesResponse
"""
all_params = ['page', 'per_page']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'page' in local_var_params:
query_params.append(('page', local_var_params['page']))
if 'per_page' in local_var_params:
query_params.append(('per_page', local_var_params['per_page']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-ROLE/roles',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListCustomPoliciesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_domain_permissions_for_agency_async(self, request):
"""查询全局服务中的委托权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询全局服务中的委托权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ListDomainPermissionsForAgencyRequest request
:return: ListDomainPermissionsForAgencyResponse
"""
return self.list_domain_permissions_for_agency_with_http_info(request)
def list_domain_permissions_for_agency_with_http_info(self, request):
"""查询全局服务中的委托权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询全局服务中的委托权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ListDomainPermissionsForAgencyRequest request
:return: ListDomainPermissionsForAgencyResponse
"""
all_params = ['domain_id', 'agency_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
if 'agency_id' in local_var_params:
path_params['agency_id'] = local_var_params['agency_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-AGENCY/domains/{domain_id}/agencies/{agency_id}/roles',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListDomainPermissionsForAgencyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_project_permissions_for_agency_async(self, request):
"""查询项目服务中的委托权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询项目服务中的委托权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ListProjectPermissionsForAgencyRequest request
:return: ListProjectPermissionsForAgencyResponse
"""
return self.list_project_permissions_for_agency_with_http_info(request)
def list_project_permissions_for_agency_with_http_info(self, request):
"""查询项目服务中的委托权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询项目服务中的委托权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ListProjectPermissionsForAgencyRequest request
:return: ListProjectPermissionsForAgencyResponse
"""
all_params = ['project_id', 'agency_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['project_id'] = local_var_params['project_id']
if 'agency_id' in local_var_params:
path_params['agency_id'] = local_var_params['agency_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-AGENCY/projects/{project_id}/agencies/{agency_id}/roles',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListProjectPermissionsForAgencyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def remove_all_projects_permission_from_agency_async(self, request):
"""移除委托下的所有项目服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)移除委托的所有项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param RemoveAllProjectsPermissionFromAgencyRequest request
:return: RemoveAllProjectsPermissionFromAgencyResponse
"""
return self.remove_all_projects_permission_from_agency_with_http_info(request)
def remove_all_projects_permission_from_agency_with_http_info(self, request):
"""移除委托下的所有项目服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)移除委托的所有项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param RemoveAllProjectsPermissionFromAgencyRequest request
:return: RemoveAllProjectsPermissionFromAgencyResponse
"""
all_params = ['agency_id', 'domain_id', 'role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'agency_id' in local_var_params:
path_params['agency_id'] = local_var_params['agency_id']
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-INHERIT/domains/{domain_id}/agencies/{agency_id}/roles/{role_id}/inherited_to_projects',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='RemoveAllProjectsPermissionFromAgencyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def remove_domain_permission_from_agency_async(self, request):
"""移除委托的全局服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)移除委托的全局服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param RemoveDomainPermissionFromAgencyRequest request
:return: RemoveDomainPermissionFromAgencyResponse
"""
return self.remove_domain_permission_from_agency_with_http_info(request)
def remove_domain_permission_from_agency_with_http_info(self, request):
"""移除委托的全局服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)移除委托的全局服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param RemoveDomainPermissionFromAgencyRequest request
:return: RemoveDomainPermissionFromAgencyResponse
"""
all_params = ['domain_id', 'agency_id', 'role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
if 'agency_id' in local_var_params:
path_params['agency_id'] = local_var_params['agency_id']
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-AGENCY/domains/{domain_id}/agencies/{agency_id}/roles/{role_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='RemoveDomainPermissionFromAgencyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def remove_project_permission_from_agency_async(self, request):
"""移除委托的项目服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)移除委托的项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param RemoveProjectPermissionFromAgencyRequest request
:return: RemoveProjectPermissionFromAgencyResponse
"""
return self.remove_project_permission_from_agency_with_http_info(request)
def remove_project_permission_from_agency_with_http_info(self, request):
"""移除委托的项目服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)移除委托的项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param RemoveProjectPermissionFromAgencyRequest request
:return: RemoveProjectPermissionFromAgencyResponse
"""
all_params = ['project_id', 'agency_id', 'role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['project_id'] = local_var_params['project_id']
if 'agency_id' in local_var_params:
path_params['agency_id'] = local_var_params['agency_id']
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-AGENCY/projects/{project_id}/agencies/{agency_id}/roles/{role_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='RemoveProjectPermissionFromAgencyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_agency_async(self, request):
"""查询委托详情
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询委托详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowAgencyRequest request
:return: ShowAgencyResponse
"""
return self.show_agency_with_http_info(request)
def show_agency_with_http_info(self, request):
"""查询委托详情
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询委托详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowAgencyRequest request
:return: ShowAgencyResponse
"""
all_params = ['agency_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'agency_id' in local_var_params:
path_params['agency_id'] = local_var_params['agency_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-AGENCY/agencies/{agency_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowAgencyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_custom_policy_async(self, request):
"""查询自定义策略详情
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询自定义策略详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowCustomPolicyRequest request
:return: ShowCustomPolicyResponse
"""
return self.show_custom_policy_with_http_info(request)
def show_custom_policy_with_http_info(self, request):
"""查询自定义策略详情
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询自定义策略详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowCustomPolicyRequest request
:return: ShowCustomPolicyResponse
"""
all_params = ['role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-ROLE/roles/{role_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowCustomPolicyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_domain_api_acl_policy_async(self, request):
"""查询账号接口访问策略
该接口可以用于查询账号接口访问控制策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowDomainApiAclPolicyRequest request
:return: ShowDomainApiAclPolicyResponse
"""
return self.show_domain_api_acl_policy_with_http_info(request)
def show_domain_api_acl_policy_with_http_info(self, request):
"""查询账号接口访问策略
该接口可以用于查询账号接口访问控制策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowDomainApiAclPolicyRequest request
:return: ShowDomainApiAclPolicyResponse
"""
all_params = ['domain_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-SECURITYPOLICY/domains/{domain_id}/api-acl-policy',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowDomainApiAclPolicyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_domain_console_acl_policy_async(self, request):
"""查询账号控制台访问策略
该接口可以用于查询账号控制台访问控制策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowDomainConsoleAclPolicyRequest request
:return: ShowDomainConsoleAclPolicyResponse
"""
return self.show_domain_console_acl_policy_with_http_info(request)
def show_domain_console_acl_policy_with_http_info(self, request):
"""查询账号控制台访问策略
该接口可以用于查询账号控制台访问控制策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowDomainConsoleAclPolicyRequest request
:return: ShowDomainConsoleAclPolicyResponse
"""
all_params = ['domain_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-SECURITYPOLICY/domains/{domain_id}/console-acl-policy',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowDomainConsoleAclPolicyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_domain_login_policy_async(self, request):
"""查询账号登录策略
该接口可以用于查询账号登录策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowDomainLoginPolicyRequest request
:return: ShowDomainLoginPolicyResponse
"""
return self.show_domain_login_policy_with_http_info(request)
def show_domain_login_policy_with_http_info(self, request):
"""查询账号登录策略
该接口可以用于查询账号登录策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowDomainLoginPolicyRequest request
:return: ShowDomainLoginPolicyResponse
"""
all_params = ['domain_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-SECURITYPOLICY/domains/{domain_id}/login-policy',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowDomainLoginPolicyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_domain_password_policy_async(self, request):
"""查询账号密码策略
该接口可以用于查询账号密码策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowDomainPasswordPolicyRequest request
:return: ShowDomainPasswordPolicyResponse
"""
return self.show_domain_password_policy_with_http_info(request)
def show_domain_password_policy_with_http_info(self, request):
"""查询账号密码策略
该接口可以用于查询账号密码策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowDomainPasswordPolicyRequest request
:return: ShowDomainPasswordPolicyResponse
"""
all_params = ['domain_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-SECURITYPOLICY/domains/{domain_id}/password-policy',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowDomainPasswordPolicyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_domain_protect_policy_async(self, request):
"""查询账号操作保护策略
该接口可以用于查询账号操作保护策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowDomainProtectPolicyRequest request
:return: ShowDomainProtectPolicyResponse
"""
return self.show_domain_protect_policy_with_http_info(request)
def show_domain_protect_policy_with_http_info(self, request):
"""查询账号操作保护策略
该接口可以用于查询账号操作保护策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowDomainProtectPolicyRequest request
:return: ShowDomainProtectPolicyResponse
"""
all_params = ['domain_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-SECURITYPOLICY/domains/{domain_id}/protect-policy',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowDomainProtectPolicyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_domain_quota_async(self, request):
"""查询账号配额
该接口可以用于查询账号配额。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowDomainQuotaRequest request
:return: ShowDomainQuotaResponse
"""
return self.show_domain_quota_with_http_info(request)
def show_domain_quota_with_http_info(self, request):
"""查询账号配额
该接口可以用于查询账号配额。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowDomainQuotaRequest request
:return: ShowDomainQuotaResponse
"""
all_params = ['domain_id', 'type']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
query_params = []
if 'type' in local_var_params:
query_params.append(('type', local_var_params['type']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-QUOTA/domains/{domain_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowDomainQuotaResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_metadata_async(self, request):
"""查询Metadata文件
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询身份提供商导入到IAM中的Metadata文件。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowMetadataRequest request
:return: ShowMetadataResponse
"""
return self.show_metadata_with_http_info(request)
def show_metadata_with_http_info(self, request):
"""查询Metadata文件
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询身份提供商导入到IAM中的Metadata文件。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowMetadataRequest request
:return: ShowMetadataResponse
"""
all_params = ['idp_id', 'protocol_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'idp_id' in local_var_params:
path_params['idp_id'] = local_var_params['idp_id']
if 'protocol_id' in local_var_params:
path_params['protocol_id'] = local_var_params['protocol_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3-ext/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id}/metadata',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowMetadataResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_open_id_connect_config_async(self, request):
"""查询OpenId Connect身份提供商配置
查询OpenId Connect身份提供商配置
:param ShowOpenIdConnectConfigRequest request
:return: ShowOpenIdConnectConfigResponse
"""
return self.show_open_id_connect_config_with_http_info(request)
def show_open_id_connect_config_with_http_info(self, request):
"""查询OpenId Connect身份提供商配置
查询OpenId Connect身份提供商配置
:param ShowOpenIdConnectConfigRequest request
:return: ShowOpenIdConnectConfigResponse
"""
all_params = ['idp_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'idp_id' in local_var_params:
path_params['idp_id'] = local_var_params['idp_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-FEDERATION/identity-providers/{idp_id}/openid-connect-config',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowOpenIdConnectConfigResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_project_details_and_status_async(self, request):
"""查询项目详情与状态
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询项目详情与状态。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowProjectDetailsAndStatusRequest request
:return: ShowProjectDetailsAndStatusResponse
"""
return self.show_project_details_and_status_with_http_info(request)
def show_project_details_and_status_with_http_info(self, request):
"""查询项目详情与状态
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询项目详情与状态。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowProjectDetailsAndStatusRequest request
:return: ShowProjectDetailsAndStatusResponse
"""
all_params = ['project_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['project_id'] = local_var_params['project_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3-ext/projects/{project_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowProjectDetailsAndStatusResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_project_quota_async(self, request):
"""查询项目配额
该接口可以用于查询项目配额。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowProjectQuotaRequest request
:return: ShowProjectQuotaResponse
"""
return self.show_project_quota_with_http_info(request)
def show_project_quota_with_http_info(self, request):
"""查询项目配额
该接口可以用于查询项目配额。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowProjectQuotaRequest request
:return: ShowProjectQuotaResponse
"""
all_params = ['project_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['project_id'] = local_var_params['project_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-QUOTA/projects/{project_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowProjectQuotaResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_agency_async(self, request):
"""修改委托
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改委托。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateAgencyRequest request
:return: UpdateAgencyResponse
"""
return self.update_agency_with_http_info(request)
def update_agency_with_http_info(self, request):
"""修改委托
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改委托。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateAgencyRequest request
:return: UpdateAgencyResponse
"""
all_params = ['agency_id', 'update_agency_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'agency_id' in local_var_params:
path_params['agency_id'] = local_var_params['agency_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-AGENCY/agencies/{agency_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateAgencyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_agency_custom_policy_async(self, request):
"""修改委托自定义策略
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改委托自定义策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateAgencyCustomPolicyRequest request
:return: UpdateAgencyCustomPolicyResponse
"""
return self.update_agency_custom_policy_with_http_info(request)
def update_agency_custom_policy_with_http_info(self, request):
"""修改委托自定义策略
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改委托自定义策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateAgencyCustomPolicyRequest request
:return: UpdateAgencyCustomPolicyResponse
"""
all_params = ['role_id', 'update_agency_custom_policy_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-ROLE/roles/{role_id}',
method='PATCH',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateAgencyCustomPolicyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_cloud_service_custom_policy_async(self, request):
"""修改云服务自定义策略
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改云服务自定义策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateCloudServiceCustomPolicyRequest request
:return: UpdateCloudServiceCustomPolicyResponse
"""
return self.update_cloud_service_custom_policy_with_http_info(request)
def update_cloud_service_custom_policy_with_http_info(self, request):
"""修改云服务自定义策略
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改云服务自定义策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateCloudServiceCustomPolicyRequest request
:return: UpdateCloudServiceCustomPolicyResponse
"""
all_params = ['role_id', 'update_cloud_service_custom_policy_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-ROLE/roles/{role_id}',
method='PATCH',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateCloudServiceCustomPolicyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_domain_api_acl_policy_async(self, request):
"""修改账号接口访问策略
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改账号接口访问策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateDomainApiAclPolicyRequest request
:return: UpdateDomainApiAclPolicyResponse
"""
return self.update_domain_api_acl_policy_with_http_info(request)
def update_domain_api_acl_policy_with_http_info(self, request):
"""修改账号接口访问策略
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改账号接口访问策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateDomainApiAclPolicyRequest request
:return: UpdateDomainApiAclPolicyResponse
"""
all_params = ['domain_id', 'update_domain_api_acl_policy_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-SECURITYPOLICY/domains/{domain_id}/api-acl-policy',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateDomainApiAclPolicyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_domain_console_acl_policy_async(self, request):
"""修改账号控制台访问策略
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改账号控制台访问策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateDomainConsoleAclPolicyRequest request
:return: UpdateDomainConsoleAclPolicyResponse
"""
return self.update_domain_console_acl_policy_with_http_info(request)
def update_domain_console_acl_policy_with_http_info(self, request):
"""修改账号控制台访问策略
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改账号控制台访问策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateDomainConsoleAclPolicyRequest request
:return: UpdateDomainConsoleAclPolicyResponse
"""
all_params = ['domain_id', 'update_domain_console_acl_policy_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-SECURITYPOLICY/domains/{domain_id}/console-acl-policy',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateDomainConsoleAclPolicyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_domain_group_inherit_role_async(self, request):
"""为用户组授予所有项目服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/zh-cn_topic_0079496985.html)为用户组授予所有项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateDomainGroupInheritRoleRequest request
:return: UpdateDomainGroupInheritRoleResponse
"""
return self.update_domain_group_inherit_role_with_http_info(request)
def update_domain_group_inherit_role_with_http_info(self, request):
"""为用户组授予所有项目服务权限
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/zh-cn_topic_0079496985.html)为用户组授予所有项目服务权限。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateDomainGroupInheritRoleRequest request
:return: UpdateDomainGroupInheritRoleResponse
"""
all_params = ['domain_id', 'group_id', 'role_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
if 'group_id' in local_var_params:
path_params['group_id'] = local_var_params['group_id']
if 'role_id' in local_var_params:
path_params['role_id'] = local_var_params['role_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateDomainGroupInheritRoleResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_domain_login_policy_async(self, request):
"""修改账号登录策略
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改账号登录策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateDomainLoginPolicyRequest request
:return: UpdateDomainLoginPolicyResponse
"""
return self.update_domain_login_policy_with_http_info(request)
def update_domain_login_policy_with_http_info(self, request):
"""修改账号登录策略
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改账号登录策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateDomainLoginPolicyRequest request
:return: UpdateDomainLoginPolicyResponse
"""
all_params = ['domain_id', 'update_domain_login_policy_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-SECURITYPOLICY/domains/{domain_id}/login-policy',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateDomainLoginPolicyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_domain_password_policy_async(self, request):
"""修改账号密码策略
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改账号密码策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateDomainPasswordPolicyRequest request
:return: UpdateDomainPasswordPolicyResponse
"""
return self.update_domain_password_policy_with_http_info(request)
def update_domain_password_policy_with_http_info(self, request):
"""修改账号密码策略
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改账号密码策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateDomainPasswordPolicyRequest request
:return: UpdateDomainPasswordPolicyResponse
"""
all_params = ['domain_id', 'update_domain_password_policy_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-SECURITYPOLICY/domains/{domain_id}/password-policy',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateDomainPasswordPolicyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_domain_protect_policy_async(self, request):
"""修改账号操作保护策略
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改账号操作保护策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateDomainProtectPolicyRequest request
:return: UpdateDomainProtectPolicyResponse
"""
return self.update_domain_protect_policy_with_http_info(request)
def update_domain_protect_policy_with_http_info(self, request):
"""修改账号操作保护策略
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改账号操作保护策略。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateDomainProtectPolicyRequest request
:return: UpdateDomainProtectPolicyResponse
"""
all_params = ['domain_id', 'update_domain_protect_policy_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'domain_id' in local_var_params:
path_params['domain_id'] = local_var_params['domain_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-SECURITYPOLICY/domains/{domain_id}/protect-policy',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateDomainProtectPolicyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_open_id_connect_config_async(self, request):
"""修改OpenId Connect身份提供商配置
修改OpenId Connect身份提供商配置
:param UpdateOpenIdConnectConfigRequest request
:return: UpdateOpenIdConnectConfigResponse
"""
return self.update_open_id_connect_config_with_http_info(request)
def update_open_id_connect_config_with_http_info(self, request):
"""修改OpenId Connect身份提供商配置
修改OpenId Connect身份提供商配置
:param UpdateOpenIdConnectConfigRequest request
:return: UpdateOpenIdConnectConfigResponse
"""
all_params = ['idp_id', 'update_open_id_connect_config_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'idp_id' in local_var_params:
path_params['idp_id'] = local_var_params['idp_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-FEDERATION/identity-providers/{idp_id}/openid-connect-config',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateOpenIdConnectConfigResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_project_status_async(self, request):
"""设置项目状态
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)设置项目状态。项目状态包括:正常、冻结。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateProjectStatusRequest request
:return: UpdateProjectStatusResponse
"""
return self.update_project_status_with_http_info(request)
def update_project_status_with_http_info(self, request):
"""设置项目状态
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)设置项目状态。项目状态包括:正常、冻结。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateProjectStatusRequest request
:return: UpdateProjectStatusResponse
"""
all_params = ['project_id', 'update_project_status_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'project_id' in local_var_params:
path_params['project_id'] = local_var_params['project_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3-ext/projects/{project_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateProjectStatusResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_permanent_access_key_async(self, request):
"""创建永久访问密钥
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)给IAM用户创建永久访问密钥,或IAM用户给自己创建永久访问密钥。 访问密钥(Access Key ID/Secret Access Key,简称AK/SK),是您通过开发工具(API、CLI、SDK)访问华为云时的身份凭证,不用于登录控制台。系统通过AK识别访问用户的身份,通过SK进行签名验证,通过加密签名验证可以确保请求的机密性、完整性和请求者身份的正确性。在控制台创建访问密钥的方式请参见:[访问密钥](https://support.huaweicloud.com/usermanual-ca/zh-cn_topic_0046606340.html) 。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CreatePermanentAccessKeyRequest request
:return: CreatePermanentAccessKeyResponse
"""
return self.create_permanent_access_key_with_http_info(request)
def create_permanent_access_key_with_http_info(self, request):
"""创建永久访问密钥
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)给IAM用户创建永久访问密钥,或IAM用户给自己创建永久访问密钥。 访问密钥(Access Key ID/Secret Access Key,简称AK/SK),是您通过开发工具(API、CLI、SDK)访问华为云时的身份凭证,不用于登录控制台。系统通过AK识别访问用户的身份,通过SK进行签名验证,通过加密签名验证可以确保请求的机密性、完整性和请求者身份的正确性。在控制台创建访问密钥的方式请参见:[访问密钥](https://support.huaweicloud.com/usermanual-ca/zh-cn_topic_0046606340.html) 。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CreatePermanentAccessKeyRequest request
:return: CreatePermanentAccessKeyResponse
"""
all_params = ['create_permanent_access_key_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-CREDENTIAL/credentials',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreatePermanentAccessKeyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_temporary_access_key_by_agency_async(self, request):
"""通过委托获取临时访问密钥
该接口可以用于通过委托来获取临时访问密钥(临时AK/SK)和securitytoken。 临时AK/SK和securitytoken是系统颁发给IAM用户的临时访问令牌,有效期为15分钟至24小时,过期后需要重新获取。临时AK/SK和securitytoken遵循权限最小化原则。鉴权时,临时AK/SK和securitytoken必须同时使用,请求头中需要添加“x-security-token”字段,使用方法详情请参考:[API签名参考](https://support.huaweicloud.com/devg-apisign/api-sign-provide.html) 。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CreateTemporaryAccessKeyByAgencyRequest request
:return: CreateTemporaryAccessKeyByAgencyResponse
"""
return self.create_temporary_access_key_by_agency_with_http_info(request)
def create_temporary_access_key_by_agency_with_http_info(self, request):
"""通过委托获取临时访问密钥
该接口可以用于通过委托来获取临时访问密钥(临时AK/SK)和securitytoken。 临时AK/SK和securitytoken是系统颁发给IAM用户的临时访问令牌,有效期为15分钟至24小时,过期后需要重新获取。临时AK/SK和securitytoken遵循权限最小化原则。鉴权时,临时AK/SK和securitytoken必须同时使用,请求头中需要添加“x-security-token”字段,使用方法详情请参考:[API签名参考](https://support.huaweicloud.com/devg-apisign/api-sign-provide.html) 。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CreateTemporaryAccessKeyByAgencyRequest request
:return: CreateTemporaryAccessKeyByAgencyResponse
"""
all_params = ['create_temporary_access_key_by_agency_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-CREDENTIAL/securitytokens',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateTemporaryAccessKeyByAgencyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_temporary_access_key_by_token_async(self, request):
"""通过token获取临时访问密钥
该接口可以用于通过token来获取临时AK/SK和securitytoken。 临时AK/SK和securitytoken是系统颁发给IAM用户的临时访问令牌,有效期为15分钟至24小时,过期后需要重新获取。临时AK/SK和securitytoken遵循权限最小化原则。鉴权时,临时AK/SK和securitytoken必须同时使用,请求头中需要添加“x-security-token”字段,使用方法详情请参考:[API签名参考](https://support.huaweicloud.com/devg-apisign/api-sign-provide.html)。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CreateTemporaryAccessKeyByTokenRequest request
:return: CreateTemporaryAccessKeyByTokenResponse
"""
return self.create_temporary_access_key_by_token_with_http_info(request)
def create_temporary_access_key_by_token_with_http_info(self, request):
"""通过token获取临时访问密钥
该接口可以用于通过token来获取临时AK/SK和securitytoken。 临时AK/SK和securitytoken是系统颁发给IAM用户的临时访问令牌,有效期为15分钟至24小时,过期后需要重新获取。临时AK/SK和securitytoken遵循权限最小化原则。鉴权时,临时AK/SK和securitytoken必须同时使用,请求头中需要添加“x-security-token”字段,使用方法详情请参考:[API签名参考](https://support.huaweicloud.com/devg-apisign/api-sign-provide.html)。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CreateTemporaryAccessKeyByTokenRequest request
:return: CreateTemporaryAccessKeyByTokenResponse
"""
all_params = ['create_temporary_access_key_by_token_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-CREDENTIAL/securitytokens',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateTemporaryAccessKeyByTokenResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_permanent_access_key_async(self, request):
"""删除指定永久访问密钥
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)删除IAM用户的指定永久访问密钥,或IAM用户删除自己的指定永久访问密钥。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param DeletePermanentAccessKeyRequest request
:return: DeletePermanentAccessKeyResponse
"""
return self.delete_permanent_access_key_with_http_info(request)
def delete_permanent_access_key_with_http_info(self, request):
"""删除指定永久访问密钥
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)删除IAM用户的指定永久访问密钥,或IAM用户删除自己的指定永久访问密钥。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param DeletePermanentAccessKeyRequest request
:return: DeletePermanentAccessKeyResponse
"""
all_params = ['access_key']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'access_key' in local_var_params:
path_params['access_key'] = local_var_params['access_key']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-CREDENTIAL/credentials/{access_key}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeletePermanentAccessKeyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_permanent_access_keys_async(self, request):
"""查询所有永久访问密钥
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询IAM用户的所有永久访问密钥,或IAM用户查询自己的所有永久访问密钥。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ListPermanentAccessKeysRequest request
:return: ListPermanentAccessKeysResponse
"""
return self.list_permanent_access_keys_with_http_info(request)
def list_permanent_access_keys_with_http_info(self, request):
"""查询所有永久访问密钥
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询IAM用户的所有永久访问密钥,或IAM用户查询自己的所有永久访问密钥。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ListPermanentAccessKeysRequest request
:return: ListPermanentAccessKeysResponse
"""
all_params = ['user_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'user_id' in local_var_params:
query_params.append(('user_id', local_var_params['user_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-CREDENTIAL/credentials',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListPermanentAccessKeysResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_permanent_access_key_async(self, request):
"""查询指定永久访问密钥
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询IAM用户的指定永久访问密钥,或IAM用户查询自己的指定永久访问密钥。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowPermanentAccessKeyRequest request
:return: ShowPermanentAccessKeyResponse
"""
return self.show_permanent_access_key_with_http_info(request)
def show_permanent_access_key_with_http_info(self, request):
"""查询指定永久访问密钥
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询IAM用户的指定永久访问密钥,或IAM用户查询自己的指定永久访问密钥。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowPermanentAccessKeyRequest request
:return: ShowPermanentAccessKeyResponse
"""
all_params = ['access_key']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'access_key' in local_var_params:
path_params['access_key'] = local_var_params['access_key']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-CREDENTIAL/credentials/{access_key}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowPermanentAccessKeyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_permanent_access_key_async(self, request):
"""修改指定永久访问密钥
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改IAM用户的指定永久访问密钥,或IAM用户修改自己的指定永久访问密钥。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdatePermanentAccessKeyRequest request
:return: UpdatePermanentAccessKeyResponse
"""
return self.update_permanent_access_key_with_http_info(request)
def update_permanent_access_key_with_http_info(self, request):
"""修改指定永久访问密钥
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改IAM用户的指定永久访问密钥,或IAM用户修改自己的指定永久访问密钥。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdatePermanentAccessKeyRequest request
:return: UpdatePermanentAccessKeyResponse
"""
all_params = ['access_key', 'update_permanent_access_key_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'access_key' in local_var_params:
path_params['access_key'] = local_var_params['access_key']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-CREDENTIAL/credentials/{access_key}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdatePermanentAccessKeyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_binding_device_async(self, request):
"""绑定MFA设备
该接口可以用于绑定MFA设备。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CreateBindingDeviceRequest request
:return: CreateBindingDeviceResponse
"""
return self.create_binding_device_with_http_info(request)
def create_binding_device_with_http_info(self, request):
"""绑定MFA设备
该接口可以用于绑定MFA设备。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CreateBindingDeviceRequest request
:return: CreateBindingDeviceResponse
"""
all_params = ['create_binding_device_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-MFA/mfa-devices/bind',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateBindingDeviceResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_mfa_device_async(self, request):
"""创建MFA设备
该接口可以用于创建MFA设备。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CreateMfaDeviceRequest request
:return: CreateMfaDeviceResponse
"""
return self.create_mfa_device_with_http_info(request)
def create_mfa_device_with_http_info(self, request):
"""创建MFA设备
该接口可以用于创建MFA设备。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CreateMfaDeviceRequest request
:return: CreateMfaDeviceResponse
"""
all_params = ['create_mfa_device_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-MFA/virtual-mfa-devices',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateMfaDeviceResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_user_async(self, request):
"""管理员创建IAM用户(推荐)
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)创建IAM用户。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CreateUserRequest request
:return: CreateUserResponse
"""
return self.create_user_with_http_info(request)
def create_user_with_http_info(self, request):
"""管理员创建IAM用户(推荐)
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)创建IAM用户。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param CreateUserRequest request
:return: CreateUserResponse
"""
all_params = ['create_user_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-USER/users',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateUserResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_binding_device_async(self, request):
"""解绑MFA设备
该接口可以用于解绑MFA设备 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param DeleteBindingDeviceRequest request
:return: DeleteBindingDeviceResponse
"""
return self.delete_binding_device_with_http_info(request)
def delete_binding_device_with_http_info(self, request):
"""解绑MFA设备
该接口可以用于解绑MFA设备 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param DeleteBindingDeviceRequest request
:return: DeleteBindingDeviceResponse
"""
all_params = ['delete_binding_device_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-MFA/mfa-devices/unbind',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteBindingDeviceResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_mfa_device_async(self, request):
"""删除MFA设备
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)删除MFA设备。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param DeleteMfaDeviceRequest request
:return: DeleteMfaDeviceResponse
"""
return self.delete_mfa_device_with_http_info(request)
def delete_mfa_device_with_http_info(self, request):
"""删除MFA设备
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)删除MFA设备。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param DeleteMfaDeviceRequest request
:return: DeleteMfaDeviceResponse
"""
all_params = ['user_id', 'serial_number']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'user_id' in local_var_params:
query_params.append(('user_id', local_var_params['user_id']))
if 'serial_number' in local_var_params:
query_params.append(('serial_number', local_var_params['serial_number']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-MFA/virtual-mfa-devices',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteMfaDeviceResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_create_user_async(self, request):
"""管理员创建IAM用户
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)创建IAM用户。IAM用户首次登录时需要修改密码。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCreateUserRequest request
:return: KeystoneCreateUserResponse
"""
return self.keystone_create_user_with_http_info(request)
def keystone_create_user_with_http_info(self, request):
"""管理员创建IAM用户
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)创建IAM用户。IAM用户首次登录时需要修改密码。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneCreateUserRequest request
:return: KeystoneCreateUserResponse
"""
all_params = ['keystone_create_user_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3/users',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneCreateUserResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_delete_user_async(self, request):
"""管理员删除IAM用户
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)删除指定IAM用户。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneDeleteUserRequest request
:return: KeystoneDeleteUserResponse
"""
return self.keystone_delete_user_with_http_info(request)
def keystone_delete_user_with_http_info(self, request):
"""管理员删除IAM用户
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)删除指定IAM用户。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneDeleteUserRequest request
:return: KeystoneDeleteUserResponse
"""
all_params = ['user_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'user_id' in local_var_params:
path_params['user_id'] = local_var_params['user_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/users/{user_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneDeleteUserResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_list_groups_for_user_async(self, request):
"""查询IAM用户所属用户组
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询IAM用户所属用户组,或IAM用户查询自己所属用户组。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListGroupsForUserRequest request
:return: KeystoneListGroupsForUserResponse
"""
return self.keystone_list_groups_for_user_with_http_info(request)
def keystone_list_groups_for_user_with_http_info(self, request):
"""查询IAM用户所属用户组
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询IAM用户所属用户组,或IAM用户查询自己所属用户组。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListGroupsForUserRequest request
:return: KeystoneListGroupsForUserResponse
"""
all_params = ['user_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'user_id' in local_var_params:
path_params['user_id'] = local_var_params['user_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/users/{user_id}/groups',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneListGroupsForUserResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_list_users_async(self, request):
"""管理员查询IAM用户列表
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询IAM用户列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListUsersRequest request
:return: KeystoneListUsersResponse
"""
return self.keystone_list_users_with_http_info(request)
def keystone_list_users_with_http_info(self, request):
"""管理员查询IAM用户列表
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询IAM用户列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneListUsersRequest request
:return: KeystoneListUsersResponse
"""
all_params = ['domain_id', 'enabled', 'name', 'password_expires_at']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'domain_id' in local_var_params:
query_params.append(('domain_id', local_var_params['domain_id']))
if 'enabled' in local_var_params:
query_params.append(('enabled', local_var_params['enabled']))
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
if 'password_expires_at' in local_var_params:
query_params.append(('password_expires_at', local_var_params['password_expires_at']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/users',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneListUsersResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_show_user_async(self, request):
"""查询IAM用户详情
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询IAM用户详情,或IAM用户查询自己的用户详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowUserRequest request
:return: KeystoneShowUserResponse
"""
return self.keystone_show_user_with_http_info(request)
def keystone_show_user_with_http_info(self, request):
"""查询IAM用户详情
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询IAM用户详情,或IAM用户查询自己的用户详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneShowUserRequest request
:return: KeystoneShowUserResponse
"""
all_params = ['user_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'user_id' in local_var_params:
path_params['user_id'] = local_var_params['user_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/users/{user_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneShowUserResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_update_user_by_admin_async(self, request):
"""管理员修改IAM用户信息
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改IAM用户信息。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneUpdateUserByAdminRequest request
:return: KeystoneUpdateUserByAdminResponse
"""
return self.keystone_update_user_by_admin_with_http_info(request)
def keystone_update_user_by_admin_with_http_info(self, request):
"""管理员修改IAM用户信息
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改IAM用户信息。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneUpdateUserByAdminRequest request
:return: KeystoneUpdateUserByAdminResponse
"""
all_params = ['user_id', 'keystone_update_user_by_admin_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'user_id' in local_var_params:
path_params['user_id'] = local_var_params['user_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3/users/{user_id}',
method='PATCH',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneUpdateUserByAdminResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_update_user_password_async(self, request):
"""修改IAM用户密码
该接口可以用于IAM用户修改自己的密码。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneUpdateUserPasswordRequest request
:return: KeystoneUpdateUserPasswordResponse
"""
return self.keystone_update_user_password_with_http_info(request)
def keystone_update_user_password_with_http_info(self, request):
"""修改IAM用户密码
该接口可以用于IAM用户修改自己的密码。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneUpdateUserPasswordRequest request
:return: KeystoneUpdateUserPasswordResponse
"""
all_params = ['user_id', 'keystone_update_user_password_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'user_id' in local_var_params:
path_params['user_id'] = local_var_params['user_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3/users/{user_id}/password',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneUpdateUserPasswordResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_user_login_protects_async(self, request):
"""查询IAM用户的登录保护状态信息列表
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询IAM用户的登录保护状态列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ListUserLoginProtectsRequest request
:return: ListUserLoginProtectsResponse
"""
return self.list_user_login_protects_with_http_info(request)
def list_user_login_protects_with_http_info(self, request):
"""查询IAM用户的登录保护状态信息列表
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询IAM用户的登录保护状态列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ListUserLoginProtectsRequest request
:return: ListUserLoginProtectsResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-USER/login-protects',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListUserLoginProtectsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_user_mfa_devices_async(self, request):
"""该接口可以用于获取MFA设备。
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询IAM用户的MFA绑定信息列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ListUserMfaDevicesRequest request
:return: ListUserMfaDevicesResponse
"""
return self.list_user_mfa_devices_with_http_info(request)
def list_user_mfa_devices_with_http_info(self, request):
"""该接口可以用于获取MFA设备。
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询IAM用户的MFA绑定信息列表。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ListUserMfaDevicesRequest request
:return: ListUserMfaDevicesResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-MFA/virtual-mfa-devices',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListUserMfaDevicesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_user_async(self, request):
"""查询IAM用户详情(推荐)
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询IAM用户详情,或IAM用户查询自己的详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowUserRequest request
:return: ShowUserResponse
"""
return self.show_user_with_http_info(request)
def show_user_with_http_info(self, request):
"""查询IAM用户详情(推荐)
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询IAM用户详情,或IAM用户查询自己的详情。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowUserRequest request
:return: ShowUserResponse
"""
all_params = ['user_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'user_id' in local_var_params:
path_params['user_id'] = local_var_params['user_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-USER/users/{user_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowUserResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_user_login_protect_async(self, request):
"""查询指定IAM用户的登录保护状态信息
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询指定IAM用户的登录保护状态信息,或IAM用户查询自己的登录保护状态信息。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowUserLoginProtectRequest request
:return: ShowUserLoginProtectResponse
"""
return self.show_user_login_protect_with_http_info(request)
def show_user_login_protect_with_http_info(self, request):
"""查询指定IAM用户的登录保护状态信息
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询指定IAM用户的登录保护状态信息,或IAM用户查询自己的登录保护状态信息。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowUserLoginProtectRequest request
:return: ShowUserLoginProtectResponse
"""
all_params = ['user_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'user_id' in local_var_params:
path_params['user_id'] = local_var_params['user_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-USER/users/{user_id}/login-protect',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowUserLoginProtectResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_user_mfa_device_async(self, request):
"""查询指定IAM用户的MFA绑定信息
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询指定IAM用户的MFA绑定信息,或IAM用户查询自己的MFA绑定信息。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowUserMfaDeviceRequest request
:return: ShowUserMfaDeviceResponse
"""
return self.show_user_mfa_device_with_http_info(request)
def show_user_mfa_device_with_http_info(self, request):
"""查询指定IAM用户的MFA绑定信息
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)查询指定IAM用户的MFA绑定信息,或IAM用户查询自己的MFA绑定信息。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param ShowUserMfaDeviceRequest request
:return: ShowUserMfaDeviceResponse
"""
all_params = ['user_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'user_id' in local_var_params:
path_params['user_id'] = local_var_params['user_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-MFA/users/{user_id}/virtual-mfa-device',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowUserMfaDeviceResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_login_protect_async(self, request):
"""修改IAM用户登录保护状态信息
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改账号操作保护。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateLoginProtectRequest request
:return: UpdateLoginProtectResponse
"""
return self.update_login_protect_with_http_info(request)
def update_login_protect_with_http_info(self, request):
"""修改IAM用户登录保护状态信息
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改账号操作保护。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateLoginProtectRequest request
:return: UpdateLoginProtectResponse
"""
all_params = ['user_id', 'update_login_protect_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'user_id' in local_var_params:
path_params['user_id'] = local_var_params['user_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-USER/users/{user_id}/login-protect',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateLoginProtectResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_user_async(self, request):
"""管理员修改IAM用户信息(推荐)
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改IAM用户信息 。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateUserRequest request
:return: UpdateUserResponse
"""
return self.update_user_with_http_info(request)
def update_user_with_http_info(self, request):
"""管理员修改IAM用户信息(推荐)
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)修改IAM用户信息 。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateUserRequest request
:return: UpdateUserResponse
"""
all_params = ['user_id', 'update_user_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'user_id' in local_var_params:
path_params['user_id'] = local_var_params['user_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-USER/users/{user_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateUserResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_user_information_async(self, request):
"""修改IAM用户信息(推荐)
该接口可以用于IAM用户修改自己的用户信息。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateUserInformationRequest request
:return: UpdateUserInformationResponse
"""
return self.update_user_information_with_http_info(request)
def update_user_information_with_http_info(self, request):
"""修改IAM用户信息(推荐)
该接口可以用于IAM用户修改自己的用户信息。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param UpdateUserInformationRequest request
:return: UpdateUserInformationResponse
"""
all_params = ['user_id', 'update_user_information_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'user_id' in local_var_params:
path_params['user_id'] = local_var_params['user_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3.0/OS-USER/users/{user_id}/info',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateUserInformationResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_create_agency_token_async(self, request):
"""获取委托Token
该接口可以用于获取委托方的token。 例如:A账号希望B账号管理自己的某些资源,所以A账号创建了委托给B账号,则A账号为委托方,B账号为被委托方。那么B账号可以通过该接口获取委托token。B账号仅能使用该token管理A账号的委托资源,不能管理自己账号中的资源。如果B账号需要管理自己账号中的资源,则需要获取自己的用户token。 token是系统颁发给用户的访问令牌,承载用户的身份、权限等信息。调用IAM以及其他云服务的接口时,可以使用本接口获取的token进行鉴权。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。如果使用全局区域的Endpoint调用,该token可以在所有区域使用;如果使用非全局区域的Endpoint调用,则该token仅在该区域生效,不能跨区域使用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。 > - token的有效期为24小时,建议进行缓存,避免频繁调用。
:param KeystoneCreateAgencyTokenRequest request
:return: KeystoneCreateAgencyTokenResponse
"""
return self.keystone_create_agency_token_with_http_info(request)
def keystone_create_agency_token_with_http_info(self, request):
"""获取委托Token
该接口可以用于获取委托方的token。 例如:A账号希望B账号管理自己的某些资源,所以A账号创建了委托给B账号,则A账号为委托方,B账号为被委托方。那么B账号可以通过该接口获取委托token。B账号仅能使用该token管理A账号的委托资源,不能管理自己账号中的资源。如果B账号需要管理自己账号中的资源,则需要获取自己的用户token。 token是系统颁发给用户的访问令牌,承载用户的身份、权限等信息。调用IAM以及其他云服务的接口时,可以使用本接口获取的token进行鉴权。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。如果使用全局区域的Endpoint调用,该token可以在所有区域使用;如果使用非全局区域的Endpoint调用,则该token仅在该区域生效,不能跨区域使用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。 > - token的有效期为24小时,建议进行缓存,避免频繁调用。
:param KeystoneCreateAgencyTokenRequest request
:return: KeystoneCreateAgencyTokenResponse
"""
all_params = ['keystone_create_agency_token_request_body', 'nocatalog']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'nocatalog' in local_var_params:
query_params.append(('nocatalog', local_var_params['nocatalog']))
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = ["X-Subject-Token"]
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3/auth/tokens',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneCreateAgencyTokenResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_create_user_token_by_password_async(self, request):
"""获取IAM用户Token(使用密码)
该接口可以用于通过用户名/密码的方式进行认证来获取IAM用户token。 token是系统颁发给IAM用户的访问令牌,承载用户的身份、权限等信息。调用IAM以及其他云服务的接口时,可以使用本接口获取的IAM用户token进行鉴权。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。如果使用全局区域的Endpoint调用,该token可以在所有区域使用;如果使用非全局区域的Endpoint调用,则该token仅在该区域生效,不能跨区域使用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。 > - token的有效期为24小时,建议进行缓存,避免频繁调用。 > - 通过Postman获取用户token示例请参见:[如何通过Postman获取用户token](https://support.huaweicloud.com/iam_faq/iam_01_034.html)。 > - 如果需要获取具有Security Administrator权限的token,请参见:[IAM 常见问题](https://support.huaweicloud.com/iam_faq/iam_01_0608.html)。
:param KeystoneCreateUserTokenByPasswordRequest request
:return: KeystoneCreateUserTokenByPasswordResponse
"""
return self.keystone_create_user_token_by_password_with_http_info(request)
def keystone_create_user_token_by_password_with_http_info(self, request):
"""获取IAM用户Token(使用密码)
该接口可以用于通过用户名/密码的方式进行认证来获取IAM用户token。 token是系统颁发给IAM用户的访问令牌,承载用户的身份、权限等信息。调用IAM以及其他云服务的接口时,可以使用本接口获取的IAM用户token进行鉴权。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。如果使用全局区域的Endpoint调用,该token可以在所有区域使用;如果使用非全局区域的Endpoint调用,则该token仅在该区域生效,不能跨区域使用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。 > - token的有效期为24小时,建议进行缓存,避免频繁调用。 > - 通过Postman获取用户token示例请参见:[如何通过Postman获取用户token](https://support.huaweicloud.com/iam_faq/iam_01_034.html)。 > - 如果需要获取具有Security Administrator权限的token,请参见:[IAM 常见问题](https://support.huaweicloud.com/iam_faq/iam_01_0608.html)。
:param KeystoneCreateUserTokenByPasswordRequest request
:return: KeystoneCreateUserTokenByPasswordResponse
"""
all_params = ['keystone_create_user_token_by_password_request_body', 'nocatalog']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'nocatalog' in local_var_params:
query_params.append(('nocatalog', local_var_params['nocatalog']))
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = ["X-Subject-Token"]
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3/auth/tokens',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneCreateUserTokenByPasswordResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_create_user_token_by_password_and_mfa_async(self, request):
"""获取IAM用户Token(使用密码+虚拟MFA)
该接口可以用于通过用户名/密码+虚拟MFA的方式进行认证,在IAM用户开启了的登录保护功能,并选择通过虚拟MFA验证时获取IAM用户token。 token是系统颁发给用户的访问令牌,承载用户的身份、权限等信息。调用IAM以及其他云服务的接口时,可以使用本接口获取的token进行鉴权。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。如果使用全局区域的Endpoint调用,该token可以在所有区域使用;如果使用非全局区域的Endpoint调用,则该token仅在该区域生效,不能跨区域使用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。 > - token的有效期为24小时,建议进行缓存,避免频繁调用。 > - 通过Postman获取用户token示例请参见:[如何通过Postman获取用户token](https://support.huaweicloud.com/iam_faq/iam_01_034.html)。 > - 如果需要获取具有Security Administrator权限的token,请参见:[IAM 常见问题](https://support.huaweicloud.com/iam_faq/iam_01_0608.html)。
:param KeystoneCreateUserTokenByPasswordAndMfaRequest request
:return: KeystoneCreateUserTokenByPasswordAndMfaResponse
"""
return self.keystone_create_user_token_by_password_and_mfa_with_http_info(request)
def keystone_create_user_token_by_password_and_mfa_with_http_info(self, request):
"""获取IAM用户Token(使用密码+虚拟MFA)
该接口可以用于通过用户名/密码+虚拟MFA的方式进行认证,在IAM用户开启了的登录保护功能,并选择通过虚拟MFA验证时获取IAM用户token。 token是系统颁发给用户的访问令牌,承载用户的身份、权限等信息。调用IAM以及其他云服务的接口时,可以使用本接口获取的token进行鉴权。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。如果使用全局区域的Endpoint调用,该token可以在所有区域使用;如果使用非全局区域的Endpoint调用,则该token仅在该区域生效,不能跨区域使用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。 > - token的有效期为24小时,建议进行缓存,避免频繁调用。 > - 通过Postman获取用户token示例请参见:[如何通过Postman获取用户token](https://support.huaweicloud.com/iam_faq/iam_01_034.html)。 > - 如果需要获取具有Security Administrator权限的token,请参见:[IAM 常见问题](https://support.huaweicloud.com/iam_faq/iam_01_0608.html)。
:param KeystoneCreateUserTokenByPasswordAndMfaRequest request
:return: KeystoneCreateUserTokenByPasswordAndMfaResponse
"""
all_params = ['keystone_create_user_token_by_password_and_mfa_request_body', 'nocatalog']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'nocatalog' in local_var_params:
query_params.append(('nocatalog', local_var_params['nocatalog']))
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = ["X-Subject-Token"]
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/v3/auth/tokens',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneCreateUserTokenByPasswordAndMfaResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def keystone_validate_token_async(self, request):
"""校验Token的有效性
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)校验本账号中IAM用户token的有效性,或IAM用户校验自己token的有效性。管理员仅能校验本账号中IAM用户token的有效性,不能校验其他账号中IAM用户token的有效性。如果被校验的token有效,则返回该token的详细信息。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneValidateTokenRequest request
:return: KeystoneValidateTokenResponse
"""
return self.keystone_validate_token_with_http_info(request)
def keystone_validate_token_with_http_info(self, request):
"""校验Token的有效性
该接口可以用于[管理员](https://support.huaweicloud.com/usermanual-iam/iam_01_0001.html)校验本账号中IAM用户token的有效性,或IAM用户校验自己token的有效性。管理员仅能校验本账号中IAM用户token的有效性,不能校验其他账号中IAM用户token的有效性。如果被校验的token有效,则返回该token的详细信息。 该接口可以使用全局区域的Endpoint和其他区域的Endpoint调用。IAM的Endpoint请参见:[地区和终端节点](https://developer.huaweicloud.com/endpoint?IAM)。
:param KeystoneValidateTokenRequest request
:return: KeystoneValidateTokenResponse
"""
all_params = ['x_subject_token', 'nocatalog']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'nocatalog' in local_var_params:
query_params.append(('nocatalog', local_var_params['nocatalog']))
header_params = {}
if 'x_subject_token' in local_var_params:
header_params['X-Subject-Token'] = local_var_params['x_subject_token']
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = ["X-Subject-Token"]
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v3/auth/tokens',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='KeystoneValidateTokenResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None,
post_params=None, response_type=None, response_headers=None, auth_settings=None,
collection_formats=None, request_type=None):
"""Makes the HTTP request and returns deserialized data.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response_type: Response data type.
:param response_headers: Header should be added to response data.
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param request_type: Request data type.
:return:
Return the response directly.
"""
return self.do_http_request(
method=method,
resource_path=resource_path,
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body,
post_params=post_params,
response_type=response_type,
response_headers=response_headers,
collection_formats=collection_formats,
request_type=request_type,
async_request=True)
| 36.081081
| 615
| 0.666078
| 30,413
| 312,390
| 6.47164
| 0.02762
| 0.029428
| 0.051498
| 0.04719
| 0.952531
| 0.947597
| 0.93265
| 0.911245
| 0.89586
| 0.883453
| 0
| 0.005916
| 0.244105
| 312,390
| 8,657
| 616
| 36.085249
| 0.827602
| 0.258856
| 0
| 0.855363
| 0
| 0.001839
| 0.102956
| 0.055442
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054954
| false
| 0.006129
| 0.002043
| 0
| 0.112564
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5361af391e3956134437ab1546aef281fd4e5c4c
| 580
|
py
|
Python
|
report_card_list.py
|
Akshara2820/Python_Folder
|
06782f88b45f907a4836e073c51f603bb19f9aa9
|
[
"MIT"
] | null | null | null |
report_card_list.py
|
Akshara2820/Python_Folder
|
06782f88b45f907a4836e073c51f603bb19f9aa9
|
[
"MIT"
] | null | null | null |
report_card_list.py
|
Akshara2820/Python_Folder
|
06782f88b45f907a4836e073c51f603bb19f9aa9
|
[
"MIT"
] | null | null | null |
marks = [[78, 76, 94, 86, 88],[91, 71, 98, 65, 76],[95, 45, 78, 52, 49]]
total_marks=0
l=len(marks)
i=0
a=0
while i<l:
j=0
while j<len(marks[a]):
total_marks+=marks[a][j]
j+=1
i+=1
a+=1
print(total_marks)
# average question
marks = [78, 76, 94, 86, 88],[91, 71, 98, 65, 76],[95, 45, 78, 52, 49]
total_marks=0
l=len(marks)
i=0
a=0
count=0
while i<l:
j=0
while j<len(marks[a]):
total_marks+=marks[a][j]
j+=1
count+=1
print(total_marks//count)
i+=1
a+=1
print(total_marks)
| 13.488372
| 72
| 0.510345
| 110
| 580
| 2.627273
| 0.245455
| 0.242215
| 0.114187
| 0.16609
| 0.84083
| 0.84083
| 0.84083
| 0.709343
| 0.709343
| 0.709343
| 0
| 0.187192
| 0.3
| 580
| 42
| 73
| 13.809524
| 0.524631
| 0.027586
| 0
| 0.827586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.103448
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
725cf25f8e3abea87a69d78ce711819559b7fbb2
| 209,689
|
py
|
Python
|
autotest/ogr/ogr_wfs.py
|
mihadyuk/gdal
|
d4627981715b82ff368547ef00ef26e0b9207048
|
[
"MIT"
] | null | null | null |
autotest/ogr/ogr_wfs.py
|
mihadyuk/gdal
|
d4627981715b82ff368547ef00ef26e0b9207048
|
[
"MIT"
] | null | null | null |
autotest/ogr/ogr_wfs.py
|
mihadyuk/gdal
|
d4627981715b82ff368547ef00ef26e0b9207048
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: WFS driver testing.
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2010-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
sys.path.append( '../pymod' )
import gdaltest
import ogrtest
from osgeo import ogr
from osgeo import osr
from osgeo import gdal
import webserver
###############################################################################
# Test underlying OGR drivers
#
def ogr_wfs_init():
gdaltest.wfs_drv = None
try:
gdaltest.wfs_drv = ogr.GetDriverByName('WFS')
except:
pass
if gdaltest.wfs_drv is None:
return 'skip'
gdaltest.geoserver_wfs = None
gdaltest.deegree_wfs = None
gdaltest.ionic_wfs = None
try:
gml_ds = ogr.Open( 'data/ionic_wfs.gml' )
except:
gml_ds = None
if gml_ds is None:
gdaltest.wfs_drv = None
if gdal.GetLastErrorMsg().find('Xerces') != -1:
return 'skip'
else:
gdaltest.post_reason( 'failed to open test file.' )
return 'skip'
return 'success'
###############################################################################
# Test reading a MapServer WFS server
def ogr_wfs_mapserver():
if gdaltest.wfs_drv is None:
return 'skip'
if gdaltest.gdalurlopen('http://www2.dmsolutions.ca/cgi-bin/mswfs_gmap') is None:
print('cannot open URL')
return 'skip'
ds = ogr.Open('WFS:http://www2.dmsolutions.ca/cgi-bin/mswfs_gmap')
if ds is None:
gdaltest.post_reason('did not managed to open WFS datastore')
return 'skip'
if ds.GetLayerCount() != 2:
gdaltest.post_reason('did not get expected layer count')
print(ds.GetLayerCount())
return 'fail'
lyr = ds.GetLayer(0)
if lyr.GetName() != 'park':
gdaltest.post_reason('did not get expected layer name')
print(lyr.GetName())
return 'fail'
sr = lyr.GetSpatialRef()
sr2 = osr.SpatialReference()
sr2.ImportFromEPSG(42304)
if not sr.IsSame(sr2):
gdaltest.post_reason('did not get expected SRS')
print(sr)
return 'fail'
feat_count = lyr.GetFeatureCount()
if feat_count != 46:
gdaltest.post_reason('did not get expected feature count')
print(feat_count)
return 'fail'
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
geom_wkt = geom.ExportToWkt()
if geom_wkt.find("POLYGON ((389366.84375 3791519.75") == -1:
gdaltest.post_reason('did not get expected feature')
feat.DumpReadable()
return 'fail'
return 'success'
###############################################################################
# Test reading a GeoServer WFS server
def ogr_wfs_geoserver():
if gdaltest.wfs_drv is None:
return 'skip'
if gdaltest.gdalurlopen('http://demo.opengeo.org/geoserver/wfs?TYPENAME=za:za_points&SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType') is None:
print('cannot open URL')
gdaltest.geoserver_wfs = False
return 'skip'
gdaltest.geoserver_wfs = True
ds = ogr.Open('WFS:http://demo.opengeo.org/geoserver/wfs?TYPENAME=za:za_points')
if ds is None:
gdaltest.post_reason('did not managed to open WFS datastore')
return 'fail'
if ds.GetLayerCount() != 1:
gdaltest.post_reason('did not get expected layer count')
print(ds.GetLayerCount())
return 'fail'
lyr = ds.GetLayer(0)
if lyr.GetName() != 'za:za_points':
gdaltest.post_reason('did not get expected layer name')
print(lyr.GetName())
return 'fail'
sr = lyr.GetSpatialRef()
sr2 = osr.SpatialReference()
sr2.ImportFromEPSG(4326)
if not sr.IsSame(sr2):
gdaltest.post_reason('did not get expected SRS')
print(sr)
return 'fail'
feat_count = lyr.GetFeatureCount()
if feat_count < 14000:
if gdal.GetLastErrorMsg().find('The connection attempt failed') != -1:
print('server probably in a broken state')
# Disable it for wfs-t test
gdaltest.geoserver_wfs = False
return 'skip'
gdaltest.post_reason('did not get expected feature count')
print(feat_count)
return 'fail'
if not lyr.TestCapability(ogr.OLCFastFeatureCount):
gdaltest.post_reason('did not get OLCFastFeatureCount')
return 'fail'
ds = ogr.Open('WFS:http://demo.opengeo.org/geoserver/wfs?TYPENAME=tiger:poi&MAXFEATURES=10&VERSION=1.1.0')
if ds is None:
print('server perhaps overloaded')
return 'skip'
lyr = ds.GetLayer(0)
gdal.ErrorReset()
feat = lyr.GetNextFeature()
# This error message is generally the sign of a server in a broken state
if feat is None and gdal.GetLastErrorMsg().find('<ows:ExceptionText>org.geoserver.platform.ServiceException') != -1:
print('server probably in a broken state')
# Disable it for wfs-t test
gdaltest.geoserver_wfs = False
return 'skip'
if feat.GetField('NAME') != 'museam' or \
ogrtest.check_feature_geometry(feat,'POINT (-74.0104611 40.70758763)',
max_error = 0.000001 ) != 0:
gdaltest.post_reason('did not get expected feature (1)')
feat.DumpReadable()
return 'fail'
# Same with VERSION=1.0.0
ds = ogr.Open('WFS:http://demo.opengeo.org/geoserver/wfs?TYPENAME=tiger:poi&MAXFEATURES=10&VERSION=1.0.0')
if ds is None:
print('server perhaps overloaded')
return 'skip'
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetField('NAME') != 'museam' or \
ogrtest.check_feature_geometry(feat,'POINT (-74.0104611 40.70758763)',
max_error = 0.000001 ) != 0:
gdaltest.post_reason('did not get expected feature (2)')
feat.DumpReadable()
return 'fail'
# Test attribute filter
ds = ogr.Open("WFS:http://demo.opengeo.org/geoserver/wfs?TYPENAME=tiger:poi")
if ds is None:
print('server perhaps overloaded')
return 'skip'
lyr = ds.GetLayer(0)
lyr.SetAttributeFilter("MAINPAGE is not null and NAME >= 'a' and NAME LIKE 'mu%%eam'")
feat_count = lyr.GetFeatureCount()
if feat_count != 1:
gdaltest.post_reason('did not get expected feature count after SetAttributeFilter (1)')
print(feat_count)
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetField('gml_id') != 'poi.1':
gdaltest.post_reason('did not get expected feature (3)')
feat.DumpReadable()
return 'fail'
if False:
# This GeoServer version doesn't understand <GmlObjectId>
lyr.SetAttributeFilter("gml_id = 'poi.1'")
feat_count = lyr.GetFeatureCount()
if feat_count != 1:
gdaltest.post_reason('did not get expected feature count after SetAttributeFilter (2)')
print(feat_count)
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetField('gml_id') != 'poi.1':
gdaltest.post_reason('did not get expected feature (4)')
feat.DumpReadable()
return 'fail'
return 'success'
###############################################################################
# Test reading a GeoServer WFS server with OUTPUTFORMAT=json
def ogr_wfs_geoserver_json():
if gdaltest.wfs_drv is None:
return 'skip'
if gdaltest.geoserver_wfs != True:
return 'skip'
ds = ogr.Open('WFS:http://demo.opengeo.org/geoserver/wfs?TYPENAME=za:za_points&MAXFEATURES=10&VERSION=1.1.0&OUTPUTFORMAT=json')
if ds is None:
gdaltest.post_reason('did not managed to open WFS datastore')
return 'fail'
if ds.GetLayerCount() != 1:
gdaltest.post_reason('did not get expected layer count')
print(ds.GetLayerCount())
return 'fail'
lyr = ds.GetLayer(0)
if lyr.GetName() != 'za:za_points':
gdaltest.post_reason('did not get expected layer name')
print(lyr.GetName())
return 'fail'
feat_count = lyr.GetFeatureCount()
if feat_count != 10:
gdaltest.post_reason('did not get expected feature count')
print(feat_count)
return 'fail'
if not lyr.TestCapability(ogr.OLCFastFeatureCount):
gdaltest.post_reason('did not get OLCFastFeatureCount')
return 'fail'
feat = lyr.GetNextFeature()
#if feat.GetField('name') != 'Alexander Bay' or \
if ogrtest.check_feature_geometry(feat,'POINT (16.4827778 -28.5947222)',
max_error = 0.000000001 ) != 0:
gdaltest.post_reason('did not get expected feature')
feat.DumpReadable()
return 'fail'
return 'success'
###############################################################################
# Test reading a GeoServer WFS server with OUTPUTFORMAT=SHAPE-ZIP
def ogr_wfs_geoserver_shapezip():
if gdaltest.wfs_drv is None:
return 'skip'
if gdaltest.geoserver_wfs != True:
return 'skip'
ds = ogr.Open('WFS:http://demo.opengeo.org/geoserver/wfs?TYPENAME=za:za_points&MAXFEATURES=10&VERSION=1.1.0&OUTPUTFORMAT=SHAPE-ZIP')
if ds is None:
gdaltest.post_reason('did not managed to open WFS datastore')
return 'fail'
if ds.GetLayerCount() != 1:
gdaltest.post_reason('did not get expected layer count')
print(ds.GetLayerCount())
return 'fail'
lyr = ds.GetLayer(0)
if lyr.GetName() != 'za:za_points':
gdaltest.post_reason('did not get expected layer name')
print(lyr.GetName())
return 'fail'
feat_count = lyr.GetFeatureCount()
if feat_count != 10:
gdaltest.post_reason('did not get expected feature count')
print(feat_count)
return 'fail'
if not lyr.TestCapability(ogr.OLCFastFeatureCount):
gdaltest.post_reason('did not get OLCFastFeatureCount')
return 'fail'
feat = lyr.GetNextFeature()
#if feat.GetField('name') != 'Alexander Bay' or \
if ogrtest.check_feature_geometry(feat,'POINT (16.4827778 -28.5947222)',
max_error = 0.000000001 ) != 0:
gdaltest.post_reason('did not get expected feature')
feat.DumpReadable()
return 'fail'
return 'success'
###############################################################################
# Test WFS paging
def ogr_wfs_geoserver_paging():
if gdaltest.wfs_drv is None:
return 'skip'
if gdaltest.geoserver_wfs != True:
return 'skip'
ds = ogr.Open('WFS:http://demo.opengeo.org/geoserver/wfs?TYPENAME=og:bugsites&VERSION=1.1.0')
lyr = ds.GetLayer(0)
feature_count_ref = lyr.GetFeatureCount()
page_size = (int)(feature_count_ref / 3) + 1
ds = None
# Test with WFS 1.0.0
gdal.SetConfigOption('OGR_WFS_PAGING_ALLOWED', 'ON')
gdal.SetConfigOption('OGR_WFS_PAGE_SIZE', '%d' % page_size)
ds = ogr.Open('WFS:http://demo.opengeo.org/geoserver/wfs?TYPENAME=og:bugsites&VERSION=1.0.0')
gdal.SetConfigOption('OGR_WFS_PAGING_ALLOWED', None)
gdal.SetConfigOption('OGR_WFS_PAGE_SIZE', None)
if ds is None:
gdaltest.post_reason('did not managed to open WFS datastore')
return 'fail'
lyr = ds.GetLayer(0)
feature_count_wfs100 = lyr.GetFeatureCount()
ds = None
if feature_count_wfs100 != feature_count_ref:
gdaltest.post_reason('fail')
print(feature_count_wfs100)
print(feature_count_ref)
return 'fail'
# Test with WFS 1.1.0
gdal.SetConfigOption('OGR_WFS_PAGING_ALLOWED', 'ON')
gdal.SetConfigOption('OGR_WFS_PAGE_SIZE', '%d' % page_size)
ds = ogr.Open('WFS:http://demo.opengeo.org/geoserver/wfs?TYPENAME=og:bugsites&VERSION=1.1.0')
gdal.SetConfigOption('OGR_WFS_PAGING_ALLOWED', None)
gdal.SetConfigOption('OGR_WFS_PAGE_SIZE', None)
if ds is None:
gdaltest.post_reason('did not managed to open WFS datastore')
return 'fail'
lyr = ds.GetLayer(0)
feature_count_wfs110 = lyr.GetFeatureCount()
feature_count_wfs110_at_hand = 0
lyr.ResetReading()
feat = lyr.GetNextFeature()
while feat is not None:
feature_count_wfs110_at_hand = feature_count_wfs110_at_hand + 1
feat = lyr.GetNextFeature()
ds = None
if feature_count_wfs110 != feature_count_ref:
gdaltest.post_reason('fail')
print(feature_count_wfs100)
print(feature_count_ref)
return 'fail'
if feature_count_wfs110_at_hand != feature_count_ref:
gdaltest.post_reason('fail')
print(feature_count_wfs110_at_hand)
print(feature_count_ref)
return 'fail'
return 'success'
###############################################################################
# Test reading a Deegree WFS server
def ogr_wfs_deegree():
if gdaltest.wfs_drv is None:
return 'skip'
if gdaltest.gdalurlopen('http://demo.deegree.org:80/utah-workspace') is None:
gdaltest.deegree_wfs = False
print('cannot open URL')
return 'skip'
gdaltest.deegree_wfs = True
ds = ogr.Open("WFS:http://demo.deegree.org:80/utah-workspace/services?ACCEPTVERSIONS=1.1.0&MAXFEATURES=10")
if ds is None:
if gdal.GetLastErrorMsg().find('Error returned by server') < 0:
gdaltest.deegree_wfs = False
return 'skip'
gdaltest.post_reason('did not managed to open WFS datastore')
return 'fail'
lyr = ds.GetLayerByName('app:SGID024_Springs')
if lyr.GetName() != 'app:SGID024_Springs':
gdaltest.post_reason('did not get expected layer name')
print(lyr.GetName())
return 'fail'
sr = lyr.GetSpatialRef()
sr2 = osr.SpatialReference()
sr2.ImportFromEPSG(26912)
if not sr.IsSame(sr2):
gdaltest.post_reason('did not get expected SRS')
print(sr)
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetField('OBJECTID') != 1 or \
ogrtest.check_feature_geometry(feat,'POINT (558750.703 4402882.05)',
max_error = 0.000000001 ) != 0:
gdaltest.post_reason('did not get expected feature')
feat.DumpReadable()
return 'fail'
# Test attribute filter
ds = ogr.Open("WFS:http://demo.deegree.org:80/utah-workspace/services?ACCEPTVERSIONS=1.1.0")
lyr = ds.GetLayerByName('app:SGID024_Springs')
lyr.SetAttributeFilter('OBJECTID = 9 or OBJECTID = 100 or (OBJECTID >= 20 and OBJECTID <= 30 and OBJECTID != 27)')
feat_count = lyr.GetFeatureCount()
if feat_count != 12:
if gdal.GetLastErrorMsg().find('XML parsing of GML file failed') < 0 and \
gdal.GetLastErrorMsg().find('No suitable driver found') < 0:
gdaltest.post_reason('did not get expected feature count after SetAttributeFilter')
print(feat_count)
return 'fail'
# Test attribute filter with gml_id
#lyr.SetAttributeFilter("gml_id = 'SGID024_Springs30' or gml_id = 'SGID024_Springs100'")
#feat_count = lyr.GetFeatureCount()
#if feat_count != 2:
# gdaltest.post_reason('did not get expected feature count after SetAttributeFilter (2)')
# print(feat_count)
# return 'fail'
return 'success'
###############################################################################
# Run test_ogrsf
def ogr_wfs_test_ogrsf():
if gdaltest.wfs_drv is None:
return 'skip'
if gdaltest.deegree_wfs != True:
return 'skip'
import test_cli_utilities
if test_cli_utilities.get_test_ogrsf_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' -ro "WFS:http://demo.deegree.org:80/utah-workspace/services?ACCEPTVERSIONS=1.1.0&MAXFEATURES=10" app:SGID024_Springs')
if ret.find('INFO') == -1 or ret.find('ERROR') != -1:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test reading a local fake WFS server
def ogr_wfs_fake_wfs_server():
if gdaltest.wfs_drv is None:
return 'skip'
(process, port) = webserver.launch()
if port == 0:
return 'skip'
gdal.SetConfigOption('OGR_WFS_LOAD_MULTIPLE_LAYER_DEFN', 'NO')
ds = ogr.Open("WFS:http://127.0.0.1:%d/fakewfs" % port)
gdal.SetConfigOption('OGR_WFS_LOAD_MULTIPLE_LAYER_DEFN', None)
if ds is None:
gdaltest.post_reason('did not managed to open WFS datastore')
webserver.server_stop(process, port)
return 'fail'
lyr = ds.GetLayerByName('rijkswegen')
if lyr.GetName() != 'rijkswegen':
gdaltest.post_reason('did not get expected layer name')
print(lyr.GetName())
webserver.server_stop(process, port)
return 'fail'
sr = lyr.GetSpatialRef()
sr2 = osr.SpatialReference()
sr2.ImportFromEPSG(28992)
if not sr.IsSame(sr2):
gdaltest.post_reason('did not get expected SRS')
print(sr)
webserver.server_stop(process, port)
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetField('MPLength') != '33513.' or \
ogrtest.check_feature_geometry(feat,'MULTICURVE ((154898.65286 568054.62753,160108.36082 566076.78094,164239.254332 563024.70188,170523.31535 561231.219583,172676.42256 559253.37299,175912.80562 557459.89069,180043.699132 553508.779495,183294.491306 552250.182732))',
max_error = 0.00001 ) != 0:
gdaltest.post_reason('did not get expected feature')
feat.DumpReadable()
webserver.server_stop(process, port)
return 'fail'
webserver.server_stop(process, port)
return 'success'
###############################################################################
# Test CreateFeature() / UpdateFeature() / DeleteFeature() (WFS-T)
def ogr_wfs_geoserver_wfst():
if gdaltest.wfs_drv is None:
return 'skip'
if gdaltest.geoserver_wfs != True:
return 'skip'
ds = ogr.Open('WFS:http://demo.opengeo.org/geoserver/wfs?VERSION=1.1.0', update = 1)
if ds is None:
return 'fail'
lyr = ds.GetLayerByName('za:za_points')
geom = ogr.CreateGeometryFromWkt('POINT(0 89.5)')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(geom)
#feat.SetField('name', 'name_set_by_ogr_wfs_8_test')
feat.SetField('type', 'type_set_by_ogr_wfs_8_test')
if lyr.CreateFeature(feat) != 0:
# Likely a bug in the current GeoServer version ??
if gdal.GetLastErrorMsg().find("No such property 'typeName'") >= 0:
return 'skip'
gdaltest.post_reason('cannot create feature')
return 'fail'
print('Feature %d created !' % feat.GetFID())
feat.SetField('type', 'type_modified_by_ogr_wfs_8_test')
if lyr.SetFeature(feat) != 0:
gdaltest.post_reason('cannot update feature')
return 'fail'
print('Feature %d updated !' % feat.GetFID())
if lyr.DeleteFeature(feat.GetFID()) != 0:
gdaltest.post_reason('could not delete feature')
return 'fail'
print('Feature %d deleted !' % feat.GetFID())
# Test transactions
if lyr.StartTransaction() != 0:
gdaltest.post_reason('CommitTransaction() failed')
return 'fail'
geom = ogr.CreateGeometryFromWkt('POINT(0 89.5)')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(geom)
#feat.SetField('name', 'name_set_by_ogr_wfs_8_test')
feat.SetField('type', 'type_set_by_ogr_wfs_8_test')
if lyr.CreateFeature(feat) != 0:
gdaltest.post_reason('cannot create feature')
return 'fail'
geom = ogr.CreateGeometryFromWkt('POINT(0 89.5)')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(geom)
#feat.SetField('name', 'name_set_by_ogr_wfs_8_test_2')
feat.SetField('type', 'type_set_by_ogr_wfs_8_test_2')
if lyr.CreateFeature(feat) != 0:
gdaltest.post_reason('cannot create feature')
return 'fail'
if lyr.CommitTransaction() != 0:
gdaltest.post_reason('CommitTransaction() failed')
return 'fail'
# Retrieve inserted features
print('Retrieving created features gml:id')
sql_lyr = ds.ExecuteSQL("SELECT _LAST_INSERTED_FIDS_ FROM za:za_points");
feat = sql_lyr.GetNextFeature()
while feat is not None:
gml_id = feat.GetFieldAsString(0)
print('Feature %s has been created in transaction !' % gml_id)
feat = sql_lyr.GetNextFeature()
feat = None
count = sql_lyr.GetFeatureCount()
ds.ReleaseResultSet(sql_lyr)
if count != 2:
gdaltest.post_reason('did not get expected feature count')
return 'fail'
# Delete a bunch of features
print('Deleting created features')
sql_lyr = ds.ExecuteSQL("DELETE FROM za:za_points WHERE type = 'type_set_by_ogr_wfs_8_test' OR type = 'type_set_by_ogr_wfs_8_test_2'")
ds.ReleaseResultSet(sql_lyr)
return 'success'
###############################################################################
# Test CreateFeature() / UpdateFeature() / DeleteFeature() with expected
# failure due to server not allowing insert & delete
def ogr_wfs_deegree_wfst():
if gdaltest.wfs_drv is None:
return 'skip'
if gdaltest.gdalurlopen('http://testing.deegree.org/deegree-wfs/services') is None:
print('cannot open URL')
return 'skip'
ds = ogr.Open('WFS:http://testing.deegree.org/deegree-wfs/services', update = 1)
if ds is None:
return 'fail'
lyr = ds.GetLayerByName('app:CountyBoundaries_edited')
geom = ogr.CreateGeometryFromWkt('POINT(2 49)')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(geom)
feat.SetField('name', 'nameSetByOGR')
feat.SetField('fips', '10')
feat.SetField('feature_id', '123456')
feat.SetField('OBJECTID', '7890123')
feat.SetField('shape_area', 12.34)
feat.SetField('shape_len', 56.78)
ret = lyr.CreateFeature(feat)
if ret != 0:
print('expected fail on CreateFeature')
ret = lyr.DeleteFeature(1)
if ret != 0:
print('expected fail on DeleteFeature')
feat = lyr.GetFeature(10)
ret = lyr.SetFeature(feat)
if ret != 0:
print('expected fail on SetFeature')
return 'success'
###############################################################################
# Test CreateFeature() / UpdateFeature() / DeleteFeature() on a WFS 1.0.0 server
def ogr_wfs_ionic_wfst():
if gdaltest.wfs_drv is None:
return 'skip'
if gdaltest.gdalurlopen('http://webservices.ionicsoft.com/ionicweb/wfs/BOSTON_ORA') is None:
print('cannot open URL')
gdaltest.ionic_wfs = False
return 'skip'
gdaltest.ionic_wfs = True
ds = ogr.Open('WFS:http://webservices.ionicsoft.com/ionicweb/wfs/BOSTON_ORA', update = 1)
if ds is None:
if gdal.GetLastErrorMsg().find('HTTP error code : 403') != -1:
gdaltest.ionic_wfs = False
return 'skip'
return 'fail'
lyr = ds.GetLayerByName('wfs:BUSINESS')
geom = ogr.CreateGeometryFromWkt('POINT(234000 890000)')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(geom)
feat.SetField('NAME', 'nameSetByOGR')
feat.SetField('TOTAL_EMPLOYEES', '10')
ret = lyr.CreateFeature(feat)
if ret != 0:
print('fail on CreateFeature')
return 'fail'
gmlid = feat.GetField('gml_id')
ret = lyr.SetFeature(feat)
if ret != 0:
print('fail on SetFeature')
return 'fail'
ds.ExecuteSQL("DELETE FROM wfs:BUSINESS WHERE gml_id = '%s'" % gmlid)
return 'success'
###############################################################################
# Test ExecuteSQL() where SQL should be turned into PROPERTYNAME and FILTER parameters
def ogr_wfs_ionic_sql():
if gdaltest.wfs_drv is None:
return 'skip'
if gdaltest.ionic_wfs != True:
return 'skip'
ds = ogr.Open('WFS:http://webservices.ionicsoft.com/ionicweb/wfs/BOSTON_ORA')
if ds is None:
return 'fail'
lyr = ds.ExecuteSQL("SELECT name FROM \"wfs:BUSINESS\" WHERE total_employees = 105")
count = lyr.GetFeatureCount()
ds.ReleaseResultSet(lyr)
if count != 1:
return 'fail'
return 'success'
###############################################################################
# Test opening a datasource from a XML description file
# The following test should issue 0 WFS http request
def ogr_wfs_xmldescriptionfile():
if gdaltest.wfs_drv is None:
return 'skip'
ds = ogr.Open('data/testwfs.xml')
lyr = ds.GetLayer(0)
feature_defn = lyr.GetLayerDefn()
index = feature_defn.GetFieldIndex('name')
sr = lyr.GetSpatialRef()
if index != 1:
print(index)
return 'fail'
wkt = sr.ExportToWkt()
if wkt.find('WGS 84') == -1:
print(wkt)
return 'fail'
layermetadata = ds.GetLayerByName('WFSLayerMetadata')
count_layers = layermetadata.GetFeatureCount()
if count_layers != ds.GetLayerCount():
gdaltest.post_reason('count_layers != ds.GetLayerCount()')
print(count_layers)
print(ds.GetLayerCount())
return 'fail'
getcapabilitieslayer = ds.GetLayerByName('WFSGetCapabilities')
getcapabilitieslayer_feat = getcapabilitieslayer.GetNextFeature()
getcapabilitieslayer_content = getcapabilitieslayer_feat.GetFieldAsString(0)
if getcapabilitieslayer_content.find('<WFS_Capabilities') != 0:
gdaltest.post_reason('did not get expected result')
print(getcapabilitieslayer_content)
return 'fail'
ds = None
return 'success'
###############################################################################
# Test opening a datasource from a XML description file that has just the URL
def ogr_wfs_xmldescriptionfile_to_be_updated():
if gdaltest.wfs_drv is None:
return 'skip'
if gdaltest.geoserver_wfs != True:
return 'skip'
f = open('tmp/ogr_wfs_xmldescriptionfile_to_be_updated.xml', 'wt')
f.write('<OGRWFSDataSource>\n')
f.write('<URL>http://demo.opengeo.org/geoserver/wfs</URL>\n')
f.write('</OGRWFSDataSource>\n')
f.close()
# Should only emit GetCapabilities and serialize it
ds = ogr.Open('tmp/ogr_wfs_xmldescriptionfile_to_be_updated.xml')
if ds is None:
return 'fail'
ds = None
f = open('tmp/ogr_wfs_xmldescriptionfile_to_be_updated.xml', 'rt')
content = f.read()
if content.find('WFS_Capabilities') == -1:
print(content)
gdaltest.post_reason('XML description file was not filled as expected')
return 'fail'
if content.find('<OGRWFSLayer') != -1:
print(content)
gdaltest.post_reason('XML description file was not filled as expected')
return 'fail'
f.close()
# Should emit DescribeFeatureType and serialize its result
ds = ogr.Open('tmp/ogr_wfs_xmldescriptionfile_to_be_updated.xml')
if ds is None:
return 'fail'
ds.GetLayerByName('za:za_points').GetLayerDefn()
ds = None
f = open('tmp/ogr_wfs_xmldescriptionfile_to_be_updated.xml', 'rt')
content = f.read()
if content.find('<OGRWFSLayer name="za:za_points">') == -1:
print(content)
gdaltest.post_reason('XML description file was not filled as expected')
return 'fail'
f.close()
os.unlink('tmp/ogr_wfs_xmldescriptionfile_to_be_updated.xml')
return 'success'
###############################################################################
# Test opening a datasource directly from a GetCapabilities answer XML file
# The following test should issue 0 WFS http request
def ogr_wfs_getcapabilitiesfile():
if gdaltest.wfs_drv is None:
return 'skip'
ds = ogr.Open('data/getcapabilities_wfs.xml')
if ds is None:
return 'fail'
ds = None
return 'success'
###############################################################################
# Test opening a datastore which only support GML 3.2.1 output
def ogr_wfs_deegree_gml321():
if gdaltest.wfs_drv is None:
return 'skip'
ds = ogr.Open('WFS:http://demo.deegree.org:80/inspire-workspace/services?ACCEPTVERSIONS=1.1.0&MAXFEATURES=10')
if ds is None:
if gdaltest.gdalurlopen('http://demo.deegree.org:80/inspire-workspace/services?ACCEPTVERSIONS=1.1.0') is None:
print('cannot open URL')
return 'skip'
if gdal.GetLastErrorMsg().find("Unable to determine the subcontroller for request type 'GetCapabilities' and service type 'WFS'") != -1:
return 'skip'
return 'fail'
lyr = ds.GetLayerByName("ad:Address")
count = lyr.GetFeatureCount()
if count != 10:
print(count)
return 'fail'
return 'success'
###############################################################################
# Test WFS 2.0.0 support
def ogr_wfs_deegree_wfs200():
if gdaltest.wfs_drv is None:
return 'skip'
ds = ogr.Open('WFS:http://demo.deegree.org:80/utah-workspace/services?ACCEPTVERSIONS=2.0.0')
if ds is None:
if gdaltest.gdalurlopen('http://demo.deegree.org:80/utah-workspace/services?ACCEPTVERSIONS=2.0.0') is None:
print('cannot open URL')
return 'skip'
return 'fail'
lyr = ds.GetLayerByName("app:SGID024_Municipalities2004_edited")
lyr.SetAttributeFilter('OBJECTID = 5')
count = lyr.GetFeatureCount()
if count != 1:
if gdal.GetLastErrorMsg().find('HTTP error code : 500') < 0:
gdaltest.post_reason("OBJECTID = 5 filter failed")
print(count)
return 'fail'
else:
feat = lyr.GetNextFeature()
if feat.GetFieldAsInteger('OBJECTID') != 5:
gdaltest.post_reason("OBJECTID = 5 filter failed")
feat.DumpReadable()
return 'fail'
lyr.SetAttributeFilter("gml_id = 'SGID024_MUNICIPALITIES2004_EDITED_5'")
count = lyr.GetFeatureCount()
if count != 1:
# FIXME ! Avoid failure on ogr_wfs_deegree_wfs200 (the server is likely buggy since it worked before, but no longer whereas the WFS client code hasn't changed)
print("gml_id = 'SGID024_MUNICIPALITIES2004_EDITED_5' filter failed")
#gdaltest.post_reason("gml_id = 'SGID024_MUNICIPALITIES2004_EDITED_5' filter failed")
#print(count)
#return 'fail'
else:
feat = lyr.GetNextFeature()
if feat.GetFieldAsInteger('OBJECTID') != 6:
gdaltest.post_reason("gml_id = 'SGID024_MUNICIPALITIES2004_EDITED_5' filter failed")
feat.DumpReadable()
return 'fail'
lyr.SetAttributeFilter(None)
lyr.SetSpatialFilterRect(-1e8,-1e8,1e8,1e8)
spatialfiltercount = lyr.GetFeatureCount()
lyr.SetSpatialFilter(None)
allcount = lyr.GetFeatureCount()
if allcount != spatialfiltercount or allcount == 0:
gdaltest.post_reason('spatialfiltercount != allcount')
print(spatialfiltercount)
print(allcount)
return 'fail'
return 'success'
###############################################################################
# Test WFS SORTBY support
def ogr_wfs_deegree_sortby():
if gdaltest.wfs_drv is None:
return 'skip'
ds = ogr.Open('WFS:http://demo.deegree.org:80/utah-workspace/services?MAXFEATURES=10&VERSION=1.1.0')
if ds is None:
if gdaltest.gdalurlopen('http://demo.deegree.org:80/utah-workspace/services') is None:
print('cannot open URL')
return 'skip'
return 'fail'
lyr = ds.ExecuteSQL("SELECT * FROM \"app:SGID024_Municipalities2004_edited\" ORDER BY OBJECTID DESC")
feat = lyr.GetNextFeature()
if feat.GetFieldAsInteger('OBJECTID') != 240:
feat.DumpReadable()
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetFieldAsInteger('OBJECTID') != 239:
feat.DumpReadable()
return 'fail'
ds.ReleaseResultSet(lyr)
return 'success'
###############################################################################
def ogr_wfs_get_multiple_layer_defn(url):
if gdaltest.wfs_drv is None:
return 'skip'
if not gdaltest.run_slow_tests():
return 'skip'
ds = ogr.Open('WFS:' + url)
if ds is None:
if gdaltest.gdalurlopen(url) is None:
print('cannot open URL')
return 'skip'
return 'fail'
# This should be slow only for the first layer
for i in range(0, ds.GetLayerCount()):
lyr = ds.GetLayer(i)
print('Layer %s has %d fields' % (lyr.GetName(), lyr.GetLayerDefn().GetFieldCount()))
return 'success'
###############################################################################
# Test a ESRI server
def ogr_wfs_esri():
return ogr_wfs_get_multiple_layer_defn('http://map.ngdc.noaa.gov/wfsconnector/com.esri.wfs.Esrimap/dart_atlantic_f')
###############################################################################
# Test a ESRI server
def ogr_wfs_esri_2():
return ogr_wfs_get_multiple_layer_defn('http://sentinel.ga.gov.au/wfsconnector/com.esri.wfs.Esrimap')
###############################################################################
# Test a CubeWerx server
def ogr_wfs_cubewerx():
return ogr_wfs_get_multiple_layer_defn('http://portal.cubewerx.com/cubewerx/cubeserv/cubeserv.cgi?CONFIG=haiti_vgi&DATASTORE=vgi')
###############################################################################
# Test a TinyOWS server
def ogr_wfs_tinyows():
return ogr_wfs_get_multiple_layer_defn('http://www.tinyows.org/cgi-bin/tinyows')
###############################################################################
# Test a ERDAS Apollo server
def ogr_wfs_erdas_apollo():
return ogr_wfs_get_multiple_layer_defn('http://apollo.erdas.com/erdas-apollo/vector/Cherokee')
###############################################################################
# Test a Integraph server
def ogr_wfs_intergraph():
return ogr_wfs_get_multiple_layer_defn('http://ideg.xunta.es/WFS_POL/request.aspx')
###############################################################################
# Test a MapInfo server
def ogr_wfs_mapinfo():
return ogr_wfs_get_multiple_layer_defn('http://www.mapinfo.com/miwfs')
###############################################################################
# Test with OGR_WFS_USE_STREAMING=NO
def ogr_wfs_turn_streaming_off():
gdal.SetConfigOption('OGR_WFS_USE_STREAMING', 'NO')
return 'success'
def ogr_wfs_turn_streaming_on():
gdal.SetConfigOption('OGR_WFS_USE_STREAMING', None)
return 'success'
###############################################################################
def ogr_wfs_vsimem_fail_because_not_enabled():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.PushErrorHandler()
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
gdal.PopErrorHandler()
if ds is not None:
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_fail_because_no_get_capabilities():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
gdal.PushErrorHandler()
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
gdal.PopErrorHandler()
if ds is not None:
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_fail_because_empty_response():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&REQUEST=GetCapabilities',
'')
gdal.PushErrorHandler()
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
if gdal.GetLastErrorMsg().find('Empty content returned by server') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_fail_because_no_WFS_Capabilities():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&REQUEST=GetCapabilities',
'<foo/>')
gdal.PushErrorHandler()
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
if gdal.GetLastErrorMsg().find('Cannot find <WFS_Capabilities>') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_fail_because_exception():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&REQUEST=GetCapabilities',
'<ServiceExceptionReport/>')
gdal.PushErrorHandler()
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
if gdal.GetLastErrorMsg().find('Error returned by server : <ServiceExceptionReport/>') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_fail_because_invalid_xml_capabilities():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&REQUEST=GetCapabilities',
'<invalid_xml')
gdal.PushErrorHandler()
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
if gdal.GetLastErrorMsg().find('Invalid XML content : <invalid_xml') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_fail_because_missing_featuretypelist():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&REQUEST=GetCapabilities',
"""<WFS_Capabilities>
</WFS_Capabilities>
""")
gdal.PushErrorHandler()
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
if gdal.GetLastErrorMsg().find('Cannot find <FeatureTypeList>') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_open_getcapabilities_file():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/caps.xml',
"""<WFS_Capabilities
""")
gdal.PushErrorHandler()
ds = ogr.Open('/vsimem/caps.xml')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
if gdal.GetLastErrorMsg().find('Parse error at EOF, not all elements have been closed') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.FileFromMemBuffer('/vsimem/caps.xml',
"""<foo><WFS_Capabilities/></foo>
""")
gdal.PushErrorHandler()
ds = ogr.Open('/vsimem/caps.xml')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
if gdal.GetLastErrorMsg().find('Cannot find <WFS_Capabilities>') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.FileFromMemBuffer('/vsimem/caps.xml',
"""<WFS_Capabilities version="1.1.0">
<FeatureTypeList>
<FeatureType/>
<FeatureType>
<Name>my_layer</Name>
</FeatureType>
</FeatureTypeList>
</WFS_Capabilities>
""")
gdal.PushErrorHandler()
ds = ogr.Open('/vsimem/caps.xml')
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
if gdal.GetLastErrorMsg().find('Cannot find base URL') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.FileFromMemBuffer('/vsimem/caps.xml',
"""<WFS_Capabilities version="1.1.0">
<ows:OperationsMetadata>
<ows:Operation name="GetCapabilities">
<ows:DCP><ows:HTTP>
<ows:Get xlink:href="/vsimem/foo"/>
<ows:Post xlink:href="/vsimem/foo"/>
</ows:HTTP></ows:DCP>
</ows:Operation>
</ows:OperationsMetadata>
<FeatureTypeList>
<FeatureType/>
<FeatureType>
<Name>my_layer</Name>
</FeatureType>
</FeatureTypeList>
</WFS_Capabilities>
""")
ds = ogr.Open('/vsimem/caps.xml')
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetLayerCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_minimal_instance():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
# Invalid response, but enough for use
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&REQUEST=GetCapabilities',
"""
<WFS_Capabilities version="1.1.0">
<ows:ServiceIdentification>
<ows:Title>LDS Testing</ows:Title>
</ows:ServiceIdentification>
<FeatureTypeList/>
</WFS_Capabilities>
""")
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetLayerCount() != 0:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetMetadataDomainList() != ['', 'xml:capabilities']:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetMetadata() != {'TITLE': 'LDS Testing'}:
gdaltest.post_reason('fail')
print(ds.GetMetadata())
return 'fail'
if len(ds.GetMetadata_List("xml:capabilities")) != 1:
gdaltest.post_reason('fail')
return 'fail'
gdal.PushErrorHandler()
ds = ogr.Open('WFS:/vsimem/wfs_endpoint', update = 1)
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_missing_describefeaturetype():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
# Invalid response, but enough for use
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&REQUEST=GetCapabilities',
"""<WFS_Capabilities version="1.1.0">
<FeatureTypeList>
<FeatureType/>
<FeatureType>
<Name>my_layer</Name>
</FeatureType>
</FeatureTypeList>
</WFS_Capabilities>
""")
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetLayerCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
lyr = ds.GetLayer(0)
if lyr.GetName() != 'my_layer':
gdaltest.post_reason('fail')
return 'fail'
# Missing DescribeFeatureType
gdal.ErrorReset()
gdal.PushErrorHandler()
lyr_defn = lyr.GetLayerDefn()
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail')
return 'fail'
if lyr_defn.GetFieldCount() != 0:
gdaltest.post_reason('fail')
return 'fail'
lyr_defn = lyr.GetLayerDefn()
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_invalid_describefeaturetype():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer',
"""<invalid_xml
""")
gdal.ErrorReset()
gdal.PushErrorHandler()
lyr_defn = lyr.GetLayerDefn()
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail')
return 'fail'
if lyr_defn.GetFieldCount() != 0:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_describefeaturetype_missing_schema():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer',
"""<missing_schema/>
""")
gdal.ErrorReset()
gdal.PushErrorHandler()
lyr_defn = lyr.GetLayerDefn()
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail')
return 'fail'
if lyr_defn.GetFieldCount() != 0:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_describefeaturetype():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer',
"""<xsd:schema xmlns:foo="http://foo" xmlns:gml="http://www.opengis.net/gml" xmlns:xsd="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified" targetNamespace="http://foo">
<xsd:import namespace="http://www.opengis.net/gml" schemaLocation="http://foo/schemas/gml/3.1.1/base/gml.xsd"/>
<xsd:complexType name="my_layerType">
<xsd:complexContent>
<xsd:extension base="gml:AbstractFeatureType">
<xsd:sequence>
<xsd:element maxOccurs="1" minOccurs="0" name="str" nillable="true" type="xsd:string"/>
<xsd:element maxOccurs="1" minOccurs="0" name="boolean" nillable="true" type="xsd:boolean"/>
<xsd:element maxOccurs="1" minOccurs="0" name="short" nillable="true" type="xsd:short"/>
<xsd:element maxOccurs="1" minOccurs="0" name="int" nillable="true" type="xsd:int"/>
<xsd:element maxOccurs="1" minOccurs="0" name="float" nillable="true" type="xsd:float"/>
<xsd:element maxOccurs="1" minOccurs="0" name="double" nillable="true" type="xsd:double"/>
<xsd:element maxOccurs="1" minOccurs="0" name="dt" nillable="true" type="xsd:dateTime"/>
<xsd:element maxOccurs="1" minOccurs="0" name="shape" nillable="true" type="gml:PointPropertyType"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:element name="my_layer" substitutionGroup="gml:_Feature" type="foo:my_layerType"/>
</xsd:schema>
""")
lyr_defn = lyr.GetLayerDefn()
if lyr_defn.GetFieldCount() != 8:
gdaltest.post_reason('fail')
return 'fail'
if lyr_defn.GetGeomFieldCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_xmldescriptionfile_to_be_updated():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
gdal.FileFromMemBuffer('/vsimem/ogr_wfs_xmldescriptionfile_to_be_updated.xml',
"""<OGRWFSDataSource>
<URL>/vsimem/wfs_endpoint</URL>
</OGRWFSDataSource>""")
ds = ogr.Open('/vsimem/ogr_wfs_xmldescriptionfile_to_be_updated.xml')
lyr = ds.GetLayer(0)
if lyr.GetName() != 'my_layer':
gdaltest.post_reason('fail')
return 'fail'
ds = None
f = gdal.VSIFOpenL('/vsimem/ogr_wfs_xmldescriptionfile_to_be_updated.xml', 'rb')
data = gdal.VSIFReadL(1, 100000, f).decode('ascii')
gdal.VSIFCloseL(f)
if data != """<OGRWFSDataSource>
<URL>/vsimem/wfs_endpoint</URL>
<WFS_Capabilities version="1.1.0">
<FeatureTypeList>
<FeatureType />
<FeatureType>
<Name>my_layer</Name>
</FeatureType>
</FeatureTypeList>
</WFS_Capabilities>
</OGRWFSDataSource>
""":
gdaltest.post_reason('fail')
print(data)
return 'fail'
ds = ogr.Open('/vsimem/ogr_wfs_xmldescriptionfile_to_be_updated.xml')
lyr = ds.GetLayer(0)
lyr.GetLayerDefn()
ds = None
f = gdal.VSIFOpenL('/vsimem/ogr_wfs_xmldescriptionfile_to_be_updated.xml', 'rb')
data = gdal.VSIFReadL(1, 100000, f).decode('ascii')
gdal.VSIFCloseL(f)
if data != """<OGRWFSDataSource>
<URL>/vsimem/wfs_endpoint</URL>
<WFS_Capabilities version="1.1.0">
<FeatureTypeList>
<FeatureType />
<FeatureType>
<Name>my_layer</Name>
</FeatureType>
</FeatureTypeList>
</WFS_Capabilities>
<OGRWFSLayer name="my_layer">
<schema foo="http://foo" gml="http://www.opengis.net/gml" xsd="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified" targetNamespace="http://foo">
<import namespace="http://www.opengis.net/gml" schemaLocation="http://foo/schemas/gml/3.1.1/base/gml.xsd" />
<complexType name="my_layerType">
<complexContent>
<extension base="gml:AbstractFeatureType">
<sequence>
<element maxOccurs="1" minOccurs="0" name="str" nillable="true" type="xsd:string" />
<element maxOccurs="1" minOccurs="0" name="boolean" nillable="true" type="xsd:boolean" />
<element maxOccurs="1" minOccurs="0" name="short" nillable="true" type="xsd:short" />
<element maxOccurs="1" minOccurs="0" name="int" nillable="true" type="xsd:int" />
<element maxOccurs="1" minOccurs="0" name="float" nillable="true" type="xsd:float" />
<element maxOccurs="1" minOccurs="0" name="double" nillable="true" type="xsd:double" />
<element maxOccurs="1" minOccurs="0" name="dt" nillable="true" type="xsd:dateTime" />
<element maxOccurs="1" minOccurs="0" name="shape" nillable="true" type="gml:PointPropertyType" />
</sequence>
</extension>
</complexContent>
</complexType>
<element name="my_layer" substitutionGroup="gml:_Feature" type="foo:my_layerType" />
</schema>
</OGRWFSLayer>
</OGRWFSDataSource>
""":
gdaltest.post_reason('fail')
print(data)
return 'fail'
gdal.FileFromMemBuffer('/vsimem/ogr_wfs_xmldescriptionfile_to_be_updated.xml',
"""<OGRWFSDataSource>
<URL>/vsimem/wfs_endpoint</URL>
<WFS_Capabilities version="1.1.0">
<FeatureTypeList>
<FeatureType />
<FeatureType>
<Name>my_layer</Name>
</FeatureType>
</FeatureTypeList>
</WFS_Capabilities>
<OGRWFSLayer name="my_layer">
<schema foo="http://foo" gml="http://www.opengis.net/gml" xsd="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified" targetNamespace="http://foo">
<import namespace="http://www.opengis.net/gml" schemaLocation="http://foo/schemas/gml/3.1.1/base/gml.xsd" />
<complexType name="my_layerType">
<complexContent>
<extension base="gml:AbstractFeatureType">
<sequence>
<element maxOccurs="1" minOccurs="0" name="str" nillable="true" type="xsd:string" />
</sequence>
</extension>
</complexContent>
</complexType>
<element name="my_layer" substitutionGroup="gml:_Feature" type="foo:my_layerType" />
</schema>
</OGRWFSLayer>
</OGRWFSDataSource>""")
ds = ogr.Open('/vsimem/ogr_wfs_xmldescriptionfile_to_be_updated.xml')
lyr = ds.GetLayer(0)
if lyr.GetLayerDefn().GetFieldCount() != 2:
gdaltest.post_reason('fail')
return 'fail'
ds = None
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_missing_getfeaturecount_no_hits():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.ErrorReset()
gdal.PushErrorHandler()
count = lyr.GetFeatureCount()
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail')
return 'fail'
if count != 0:
gdaltest.post_reason('fail')
print(count)
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_missing_getfeaturecount_with_hits():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&REQUEST=GetCapabilities',
"""<WFS_Capabilities version="1.1.0">
<OperationsMetadata>
<ows:Operation name="GetFeature">
<ows:Parameter name="resultType">
<ows:Value>results</ows:Value>
<ows:Value>hits</ows:Value>
</ows:Parameter>
</ows:Operation>
</OperationsMetadata>
<FeatureTypeList>
<FeatureType>
<Name>my_layer</Name>
<DefaultSRS>urn:ogc:def:crs:EPSG::4326</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180.0 -90.0</ows:LowerCorner>
<ows:UpperCorner>180.0 90.0</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
</FeatureTypeList>
</WFS_Capabilities>
""")
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.ErrorReset()
gdal.PushErrorHandler()
count = lyr.GetFeatureCount()
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail')
return 'fail'
if count != 0:
gdaltest.post_reason('fail')
print(count)
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_invalid_getfeaturecount_with_hits():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer&RESULTTYPE=hits',
"""<invalid_xml""")
gdal.ErrorReset()
gdal.PushErrorHandler()
count = lyr.GetFeatureCount()
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail')
return 'fail'
if count != 0:
gdaltest.post_reason('fail')
print(count)
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_getfeaturecount_with_hits_missing_FeatureCollection():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer&RESULTTYPE=hits',
"""<dummy_xml/>""")
gdal.ErrorReset()
gdal.PushErrorHandler()
count = lyr.GetFeatureCount()
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail')
return 'fail'
if count != 0:
gdaltest.post_reason('fail')
print(count)
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_getfeaturecount_with_hits_invalid_xml():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer&RESULTTYPE=hits',
"""<invalid_xml""")
gdal.ErrorReset()
gdal.PushErrorHandler()
count = lyr.GetFeatureCount()
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail')
return 'fail'
if count != 0:
gdaltest.post_reason('fail')
print(count)
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_getfeaturecount_with_hits_ServiceExceptionReport():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer&RESULTTYPE=hits',
"""<ServiceExceptionReport/>""")
gdal.ErrorReset()
gdal.PushErrorHandler()
count = lyr.GetFeatureCount()
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail')
return 'fail'
if count != 0:
gdaltest.post_reason('fail')
print(count)
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_getfeaturecount_with_hits_missing_numberOfFeatures():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer&RESULTTYPE=hits',
"""<FeatureCollection/>""")
gdal.ErrorReset()
gdal.PushErrorHandler()
count = lyr.GetFeatureCount()
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail')
return 'fail'
if count != 0:
gdaltest.post_reason('fail')
print(count)
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_getfeaturecount_with_hits():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer&RESULTTYPE=hits',
"""<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:ogc="http://www.opengis.net/ogc"
xmlns:foo="http://foo"
xmlns:wfs="http://www.opengis.net/wfs"
xmlns:ows="http://www.opengis.net/ows"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:gml="http://www.opengis.net/gml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberOfFeatures="1"
timeStamp="2015-04-17T14:14:24.859Z"
xsi:schemaLocation="http://foo /vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer
http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
</wfs:FeatureCollection>""")
count = lyr.GetFeatureCount()
if count != 1:
gdaltest.post_reason('fail')
print(count)
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_missing_getfeature():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.ErrorReset()
gdal.PushErrorHandler()
f = lyr.GetNextFeature()
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail')
return 'fail'
if f is not None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_invalid_getfeature():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer',
"""<invalid_xml
""")
gdal.ErrorReset()
gdal.PushErrorHandler()
f = lyr.GetNextFeature()
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail')
return 'fail'
if f is not None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_exception_getfeature():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer',
"""<ServiceExceptionReport/>
""")
gdal.ErrorReset()
gdal.PushErrorHandler()
f = lyr.GetNextFeature()
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg().find('Error returned by server') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
if f is not None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_getfeature():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&REQUEST=GetCapabilities',
"""<WFS_Capabilities version="1.1.0">
<FeatureTypeList>
<FeatureType>
<Name>my_layer</Name>
<DefaultSRS>urn:ogc:def:crs:EPSG::4326</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-170.0 -80.0</ows:LowerCorner>
<ows:UpperCorner>170.0 80.0</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
</FeatureTypeList>
</WFS_Capabilities>
""")
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer',
"""<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:ogc="http://www.opengis.net/ogc"
xmlns:foo="http://foo"
xmlns:wfs="http://www.opengis.net/wfs"
xmlns:ows="http://www.opengis.net/ows"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:gml="http://www.opengis.net/gml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberOfFeatures="1"
timeStamp="2015-04-17T14:14:24.859Z"
xsi:schemaLocation="http://foo /vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer
http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<gml:featureMembers>
<foo:my_layer gml:id="my_layer.1">
<foo:str>str</foo:str>
<foo:boolean>true</foo:boolean>
<foo:short>1</foo:short>
<foo:int>123456789</foo:int>
<foo:float>1.2</foo:float>
<foo:double>1.23</foo:double>
<foo:dt>2015-04-17T12:34:56Z</foo:dt>
<foo:shape>
<gml:Point srsDimension="2" srsName="urn:ogc:def:crs:EPSG::4326">
<gml:pos>49 2</gml:pos>
</gml:Point>
</foo:shape>
</foo:my_layer>
</gml:featureMembers>
</wfs:FeatureCollection>
""")
f = lyr.GetNextFeature()
if f.gml_id != 'my_layer.1' or f.boolean != 1 or f.str != 'str' or f.short != 1 or \
f.int != 123456789 or f.float != 1.2 or f.double != 1.23 or f.dt != '2015-04-17T12:34:56Z' or \
f.GetGeometryRef().ExportToWkt() != 'POINT (2 49)':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
sql_lyr = ds.ExecuteSQL('SELECT * FROM my_layer')
f = sql_lyr.GetNextFeature()
if f.gml_id != 'my_layer.1':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
ds.ReleaseResultSet(sql_lyr)
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_getextent():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
if lyr.GetExtent() != (2,2,49,49):
gdaltest.post_reason('fail')
print(lyr.GetExtent())
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_getextent_without_getfeature():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.Unlink('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.PushErrorHandler()
extent = lyr.GetExtent()
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail')
return 'fail'
if extent != (0,0,0,0):
gdaltest.post_reason('fail')
print(extent)
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_getextent_optimized():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&REQUEST=GetCapabilities',
"""<WFS_Capabilities version="1.1.0">
<FeatureTypeList>
<FeatureType>
<Name>my_layer</Name>
<DefaultSRS>urn:ogc:def:crs:EPSG::4326</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180.0 -90.0</ows:LowerCorner>
<ows:UpperCorner>180.0 90.0</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
<FeatureType>
<Name>my_layer2</Name>
<DefaultSRS>urn:ogc:def:crs:EPSG::4326</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-170.0 -80.0</ows:LowerCorner>
<ows:UpperCorner>170.0 80.0</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
<FeatureType>
<Name>my_layer3</Name>
<DefaultSRS>urn:ogc:def:crs:EPSG::3857</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180.0 -85.0511287798065</ows:LowerCorner>
<ows:UpperCorner>180.0 85.0511287798065</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
<FeatureType>
<Name>my_layer4</Name>
<DefaultSRS>urn:ogc:def:crs:EPSG::3857</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180.0 -90</ows:LowerCorner>
<ows:UpperCorner>180.0 90</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
</FeatureTypeList>
<ogc:Filter_Capabilities>
<ogc:Scalar_Capabilities>
<ogc:ArithmeticOperators>
<ogc:SimpleArithmetic/>
<ogc:Functions>
<ogc:FunctionNames>
<ogc:FunctionName nArgs="1">abs_4</ogc:FunctionName> <!-- geoserver "signature" -->
</ogc:FunctionNames>
</ogc:Functions>
</ogc:ArithmeticOperators>
</ogc:Scalar_Capabilities>
</ogc:Filter_Capabilities>
</WFS_Capabilities>
""")
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
if lyr.GetExtent() != (-180.0, 180.0, -90.0, 90.0):
gdaltest.post_reason('fail')
print(lyr.GetExtent())
return 'fail'
lyr = ds.GetLayer(1)
gdal.PushErrorHandler()
got_extent = lyr.GetExtent()
gdal.PopErrorHandler()
if got_extent != (0.0, 0.0, 0.0, 0.0):
gdaltest.post_reason('fail')
print(got_extent)
return 'fail'
ds = gdal.OpenEx('WFS:/vsimem/wfs_endpoint', open_options = ['TRUST_CAPABILITIES_BOUNDS=YES'])
lyr = ds.GetLayer(1)
if lyr.GetExtent() != (-170.0, 170.0, -80.0, 80.0):
gdaltest.post_reason('fail')
print(lyr.GetExtent())
return 'fail'
sys.path.append('../osr')
import osr_ct
osr_ct.osr_ct_1()
if gdaltest.have_proj4 == 1:
gdal.SetConfigOption('OGR_WFS_TRUST_CAPABILITIES_BOUNDS', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
gdal.SetConfigOption('OGR_WFS_TRUST_CAPABILITIES_BOUNDS', None)
lyr = ds.GetLayer(2)
expected_extent = (-20037508.342789248, 20037508.342789248, -20037508.342789154, 20037508.342789147)
got_extent = lyr.GetExtent()
for i in range(4):
if abs(expected_extent[i]-got_extent[i]) > 1e-5:
gdaltest.post_reason('fail')
print(got_extent)
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_getfeature_ogr_getfeature():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&REQUEST=GetCapabilities',
"""<WFS_Capabilities version="1.1.0">
<FeatureTypeList>
<FeatureType>
<Name>my_layer</Name>
<DefaultSRS>urn:ogc:def:crs:EPSG::4326</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180.0 -90.0</ows:LowerCorner>
<ows:UpperCorner>180.0 90.0</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
</FeatureTypeList>
<ogc:Filter_Capabilities>
<ogc:Spatial_Capabilities>
<ogc:GeometryOperands>
<ogc:GeometryOperand>gml:Envelope</ogc:GeometryOperand>
<ogc:GeometryOperand>gml:Point</ogc:GeometryOperand>
<ogc:GeometryOperand>gml:LineString</ogc:GeometryOperand>
<ogc:GeometryOperand>gml:Polygon</ogc:GeometryOperand>
</ogc:GeometryOperands>
<ogc:SpatialOperators>
<ogc:SpatialOperator name="Disjoint"/>
<ogc:SpatialOperator name="Equals"/>
<ogc:SpatialOperator name="DWithin"/>
<ogc:SpatialOperator name="Beyond"/>
<ogc:SpatialOperator name="Intersects"/>
<ogc:SpatialOperator name="Touches"/>
<ogc:SpatialOperator name="Crosses"/>
<ogc:SpatialOperator name="Within"/>
<ogc:SpatialOperator name="Contains"/>
<ogc:SpatialOperator name="Overlaps"/>
<ogc:SpatialOperator name="BBOX"/>
</ogc:SpatialOperators>
</ogc:Spatial_Capabilities>
<ogc:Scalar_Capabilities>
<ogc:LogicalOperators/>
<ogc:ComparisonOperators>
<ogc:ComparisonOperator>LessThan</ogc:ComparisonOperator>
<ogc:ComparisonOperator>GreaterThan</ogc:ComparisonOperator>
<ogc:ComparisonOperator>LessThanEqualTo</ogc:ComparisonOperator>
<ogc:ComparisonOperator>GreaterThanEqualTo</ogc:ComparisonOperator>
<ogc:ComparisonOperator>EqualTo</ogc:ComparisonOperator>
<ogc:ComparisonOperator>NotEqualTo</ogc:ComparisonOperator>
<ogc:ComparisonOperator>Like</ogc:ComparisonOperator>
<ogc:ComparisonOperator>Between</ogc:ComparisonOperator>
<ogc:ComparisonOperator>NullCheck</ogc:ComparisonOperator>
</ogc:ComparisonOperators>
<ogc:ArithmeticOperators>
<ogc:SimpleArithmetic/>
<ogc:Functions/>
</ogc:ArithmeticOperators>
</ogc:Scalar_Capabilities>
<ogc:Id_Capabilities>
<ogc:FID/>
<ogc:EID/>
</ogc:Id_Capabilities>
</ogc:Filter_Capabilities>
</WFS_Capabilities>
""")
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Fogc%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%22%3E%3CGmlObjectId%20id%3D%22my_layer.100%22%2F%3E%3C%2FFilter%3E',
"""<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:ogc="http://www.opengis.net/ogc"
xmlns:foo="http://foo"
xmlns:wfs="http://www.opengis.net/wfs"
xmlns:ows="http://www.opengis.net/ows"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:gml="http://www.opengis.net/gml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberOfFeatures="1"
timeStamp="2015-04-17T14:14:24.859Z"
xsi:schemaLocation="http://foo /vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer
http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<gml:featureMembers>
<foo:my_layer gml:id="my_layer.100">
</foo:my_layer>
</gml:featureMembers>
</wfs:FeatureCollection>
""")
f = lyr.GetFeature(100)
if f.gml_id != 'my_layer.100':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_filter_gml_id_failed():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer',
"""<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:ogc="http://www.opengis.net/ogc"
xmlns:foo="http://foo"
xmlns:wfs="http://www.opengis.net/wfs"
xmlns:ows="http://www.opengis.net/ows"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:gml="http://www.opengis.net/gml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberOfFeatures="0"
timeStamp="2015-04-17T14:14:24.859Z"
xsi:schemaLocation="http://foo /vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer
http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
</wfs:FeatureCollection>
""")
lyr.SetAttributeFilter("gml_id = 'my_layer.1'")
gdal.ErrorReset()
gdal.PushErrorHandler()
f = lyr.GetNextFeature()
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail')
return 'fail'
if f is not None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_filter_gml_id_success():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Fogc%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%22%3E%3CGmlObjectId%20id%3D%22my_layer.1%22%2F%3E%3CGmlObjectId%20id%3D%22my_layer.1%22%2F%3E%3C%2FFilter%3E',
"""<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:ogc="http://www.opengis.net/ogc"
xmlns:foo="http://foo"
xmlns:wfs="http://www.opengis.net/wfs"
xmlns:ows="http://www.opengis.net/ows"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:gml="http://www.opengis.net/gml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberOfFeatures="1"
timeStamp="2015-04-17T14:14:24.859Z"
xsi:schemaLocation="http://foo /vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer
http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<gml:featureMembers>
<foo:my_layer gml:id="my_layer.1">
<foo:str>str</foo:str>
<foo:boolean>true</foo:boolean>
<foo:short>1</foo:short>
<foo:int>123456789</foo:int>
<foo:float>1.2</foo:float>
<foo:double>1.23</foo:double>
<foo:dt>2015-04-17T12:34:56Z</foo:dt>
<foo:shape>
<gml:Point srsDimension="2" srsName="urn:ogc:def:crs:EPSG::4326">
<gml:pos>49 2</gml:pos>
</gml:Point>
</foo:shape>
</foo:my_layer>
</gml:featureMembers>
</wfs:FeatureCollection>
""")
lyr.SetAttributeFilter("gml_id = 'my_layer.1' OR gml_id = 'my_layer.1'")
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_filter():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Fogc%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%22%3E%3COr%3E%3COr%3E%3COr%3E%3COr%3E%3COr%3E%3COr%3E%3COr%3E%3CAnd%3E%3CAnd%3E%3CPropertyIsEqualTo%3E%3CPropertyName%3Estr%3C%2FPropertyName%3E%3CLiteral%3Estr%3C%2FLiteral%3E%3C%2FPropertyIsEqualTo%3E%3CPropertyIsEqualTo%3E%3CPropertyName%3Eshort%3C%2FPropertyName%3E%3CLiteral%3E1%3C%2FLiteral%3E%3C%2FPropertyIsEqualTo%3E%3C%2FAnd%3E%3CPropertyIsEqualTo%3E%3CPropertyName%3Efloat%3C%2FPropertyName%3E%3CLiteral%3E1.2%3C%2FLiteral%3E%3C%2FPropertyIsEqualTo%3E%3C%2FAnd%3E%3CPropertyIsLike%20wildCard%3D%27%2A%27%20singleChar%3D%27_%27%20escapeChar%3D%27%21%27%3E%3CPropertyName%3Estr%3C%2FPropertyName%3E%3CLiteral%3Est%2A%3C%2FLiteral%3E%3C%2FPropertyIsLike%3E%3C%2FOr%3E%3CNot%3E%3CPropertyIsNull%3E%3CPropertyName%3Eboolean%3C%2FPropertyName%3E%3C%2FPropertyIsNull%3E%3C%2FNot%3E%3C%2FOr%3E%3CPropertyIsGreaterThan%3E%3CPropertyName%3Eint%3C%2FPropertyName%3E%3CLiteral%3E1%3C%2FLiteral%3E%3C%2FPropertyIsGreaterThan%3E%3C%2FOr%3E%3CPropertyIsGreaterThanOrEqualTo%3E%3CPropertyName%3Eint%3C%2FPropertyName%3E%3CLiteral%3E1%3C%2FLiteral%3E%3C%2FPropertyIsGreaterThanOrEqualTo%3E%3C%2FOr%3E%3CPropertyIsNotEqualTo%3E%3CPropertyName%3Eint%3C%2FPropertyName%3E%3CLiteral%3E2%3C%2FLiteral%3E%3C%2FPropertyIsNotEqualTo%3E%3C%2FOr%3E%3CPropertyIsLessThan%3E%3CPropertyName%3Eint%3C%2FPropertyName%3E%3CLiteral%3E2000000000%3C%2FLiteral%3E%3C%2FPropertyIsLessThan%3E%3C%2FOr%3E%3CPropertyIsLessThanOrEqualTo%3E%3CPropertyName%3Eint%3C%2FPropertyName%3E%3CLiteral%3E2000000000%3C%2FLiteral%3E%3C%2FPropertyIsLessThanOrEqualTo%3E%3C%2FOr%3E%3C%2FFilter%3E',
"""<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:ogc="http://www.opengis.net/ogc"
xmlns:foo="http://foo"
xmlns:wfs="http://www.opengis.net/wfs"
xmlns:ows="http://www.opengis.net/ows"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:gml="http://www.opengis.net/gml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberOfFeatures="1"
timeStamp="2015-04-17T14:14:24.859Z"
xsi:schemaLocation="http://foo /vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer
http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<gml:featureMembers>
<foo:my_layer gml:id="my_layer.1">
<foo:str>str</foo:str>
<foo:boolean>true</foo:boolean>
<foo:short>1</foo:short>
<foo:int>123456789</foo:int>
<foo:float>1.2</foo:float>
<foo:double>1.23</foo:double>
<foo:dt>2015-04-17T12:34:56Z</foo:dt>
<foo:shape>
<gml:Point srsDimension="2" srsName="urn:ogc:def:crs:EPSG::4326">
<gml:pos>49 2</gml:pos>
</gml:Point>
</foo:shape>
</foo:my_layer>
</gml:featureMembers>
</wfs:FeatureCollection>
""")
lyr.SetAttributeFilter("(str = 'str' AND short = 1 AND float = 1.2) OR str LIKE 'st%' OR boolean IS NOT NULL OR int > 1 OR int >= 1 or int != 2 or int < 2000000000 or int <= 2000000000")
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_filter_spatial_ops():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
content = """<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:ogc="http://www.opengis.net/ogc"
xmlns:foo="http://foo"
xmlns:wfs="http://www.opengis.net/wfs"
xmlns:ows="http://www.opengis.net/ows"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:gml="http://www.opengis.net/gml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberOfFeatures="1"
timeStamp="2015-04-17T14:14:24.859Z"
xsi:schemaLocation="http://foo /vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer
http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<gml:featureMembers>
<foo:my_layer gml:id="my_layer.1">
<foo:str>str</foo:str>
<foo:boolean>true</foo:boolean>
<foo:short>1</foo:short>
<foo:int>123456789</foo:int>
<foo:float>1.2</foo:float>
<foo:double>1.23</foo:double>
<foo:dt>2015-04-17T12:34:56Z</foo:dt>
<foo:shape>
<gml:Point srsDimension="2" srsName="urn:ogc:def:crs:EPSG::4326">
<gml:pos>49 2</gml:pos>
</gml:Point>
</foo:shape>
</foo:my_layer>
</gml:featureMembers>
</wfs:FeatureCollection>
"""
# Invalid syntax
gdal.PushErrorHandler()
ret = lyr.SetAttributeFilter("ST_Intersects(shape)")
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Wrong number of arguments for ST_Intersects') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.PushErrorHandler()
ret = lyr.SetAttributeFilter("ST_Intersects(shape, 5)")
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Wrong field type for argument 2 of ST_Intersects') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.PushErrorHandler()
ret = lyr.SetAttributeFilter("ST_Intersects(shape, ST_MakeEnvelope(1))")
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Wrong number of arguments for ST_MakeEnvelope') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.PushErrorHandler()
ret = lyr.SetAttributeFilter("ST_Intersects(shape, ST_MakeEnvelope(1,1,1,'a'))")
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Wrong field type for argument 4 of ST_MakeEnvelope') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.PushErrorHandler()
ret = lyr.SetAttributeFilter("ST_Intersects(shape, ST_MakeEnvelope(1,1,1,1,3.5))")
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Wrong field type for argument 5 of ST_MakeEnvelope') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.PushErrorHandler()
ret = lyr.SetAttributeFilter("ST_Intersects(shape, ST_MakeEnvelope(1,1,1,1,'not_a_srs'))")
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Wrong value for argument 5 of ST_MakeEnvelope') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.PushErrorHandler()
ret = lyr.SetAttributeFilter("ST_Intersects(shape, ST_MakeEnvelope(1,1,1,1,-5))")
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Wrong value for argument 5 of ST_MakeEnvelope') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.PushErrorHandler()
ret = lyr.SetAttributeFilter("ST_Intersects(shape, ST_GeomFromText(1,2,3))")
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Wrong number of arguments for ST_GeomFromText') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.PushErrorHandler()
ret = lyr.SetAttributeFilter("ST_Intersects(shape, ST_GeomFromText(1))")
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Wrong field type for argument 1 of ST_GeomFromText') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.PushErrorHandler()
ret = lyr.SetAttributeFilter("ST_Intersects(shape, ST_GeomFromText('INVALID_GEOM'))")
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Wrong value for argument 1 of ST_GeomFromText') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.PushErrorHandler()
ret = lyr.SetAttributeFilter("ST_Intersects(shape, ST_GeomFromText('POINT(0 0)', 'invalid_srs'))")
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Wrong value for argument 2 of ST_GeomFromText') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.PushErrorHandler()
ret = lyr.SetAttributeFilter("ST_DWithin(shape)")
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Wrong number of arguments for ST_DWithin') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.PushErrorHandler()
ret = lyr.SetAttributeFilter("ST_DWithin(shape,'a',5)")
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Wrong field type for argument 2 of ST_DWithin') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.PushErrorHandler()
ret = lyr.SetAttributeFilter("ST_DWithin(shape,shape,'a')")
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Wrong field type for argument 3 of ST_DWithin') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
# Now valid requests
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Fogc%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%22%3E%3COr%3E%3COr%3E%3COr%3E%3CIntersects%3E%3CPropertyName%3Eshape%3C%2FPropertyName%3E%3Cgml:Envelope%20srsName%3D%22urn:ogc:def:crs:EPSG::4326%22%3E%3Cgml:lowerCorner%3E48.5%201.5%3C%2Fgml:lowerCorner%3E%3Cgml:upperCorner%3E49.5%202.5%3C%2Fgml:upperCorner%3E%3C%2Fgml:Envelope%3E%3C%2FIntersects%3E%3CIntersects%3E%3CPropertyName%3Eshape%3C%2FPropertyName%3E%3Cgml:Envelope%20srsName%3D%22urn:ogc:def:crs:EPSG::4326%22%3E%3Cgml:lowerCorner%3E48.5%201.5%3C%2Fgml:lowerCorner%3E%3Cgml:upperCorner%3E49.5%202.5%3C%2Fgml:upperCorner%3E%3C%2Fgml:Envelope%3E%3C%2FIntersects%3E%3C%2FOr%3E%3CIntersects%3E%3CPropertyName%3Eshape%3C%2FPropertyName%3E%3Cgml:Envelope%20srsName%3D%22EPSG:4326%22%3E%3Cgml:lowerCorner%3E1.5%2048.5%3C%2Fgml:lowerCorner%3E%3Cgml:upperCorner%3E2.5%2049.5%3C%2Fgml:upperCorner%3E%3C%2Fgml:Envelope%3E%3C%2FIntersects%3E%3C%2FOr%3E%3CIntersects%3E%3CPropertyName%3Eshape%3C%2FPropertyName%3E%3Cgml:Envelope%20srsName%3D%22urn:ogc:def:crs:EPSG::32630%22%3E%3Cgml:lowerCorner%3E380000%205370000%3C%2Fgml:lowerCorner%3E%3Cgml:upperCorner%3E470000%205490000%3C%2Fgml:upperCorner%3E%3C%2Fgml:Envelope%3E%3C%2FIntersects%3E%3C%2FOr%3E%3C%2FFilter%3E',
content)
lyr.SetAttributeFilter("ST_Intersects(shape, ST_MakeEnvelope(1.5,48.5,2.5,49.5)) OR " + \
"ST_Intersects(shape, ST_MakeEnvelope(1.5,48.5,2.5,49.5, 4326)) OR " + \
"ST_Intersects(shape, ST_MakeEnvelope(1.5,48.5,2.5,49.5, 'EPSG:4326')) OR " + \
"ST_Intersects(shape, ST_MakeEnvelope(380000,5370000,470000,5490000,32630))")
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Fogc%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%22%3E%3COr%3E%3COr%3E%3CIntersects%3E%3CPropertyName%3Eshape%3C%2FPropertyName%3E%3Cgml:Polygon%20srsName%3D%22urn:ogc:def:crs:EPSG::4326%22%20gml:id%3D%22id1%22%3E%3Cgml:exterior%3E%3Cgml:LinearRing%3E%3Cgml:posList%3E48.5%201.5%2049.5%202.5%2049.5%202.5%2048.5%202.5%2048.5%201.5%3C%2Fgml:posList%3E%3C%2Fgml:LinearRing%3E%3C%2Fgml:exterior%3E%3C%2Fgml:Polygon%3E%3C%2FIntersects%3E%3CIntersects%3E%3CPropertyName%3Eshape%3C%2FPropertyName%3E%3Cgml:Polygon%20srsName%3D%22urn:ogc:def:crs:EPSG::4326%22%20gml:id%3D%22id2%22%3E%3Cgml:exterior%3E%3Cgml:LinearRing%3E%3Cgml:posList%3E48.5%201.5%2049.5%202.5%2049.5%202.5%2048.5%202.5%2048.5%201.5%3C%2Fgml:posList%3E%3C%2Fgml:LinearRing%3E%3C%2Fgml:exterior%3E%3C%2Fgml:Polygon%3E%3C%2FIntersects%3E%3C%2FOr%3E%3CIntersects%3E%3CPropertyName%3Eshape%3C%2FPropertyName%3E%3Cgml:Polygon%20srsName%3D%22EPSG:4326%22%20gml:id%3D%22id3%22%3E%3Cgml:exterior%3E%3Cgml:LinearRing%3E%3Cgml:posList%3E1.5%2048.5%202.5%2049.5%202.5%2049.5%202.5%2048.5%201.5%2048.5%3C%2Fgml:posList%3E%3C%2Fgml:LinearRing%3E%3C%2Fgml:exterior%3E%3C%2Fgml:Polygon%3E%3C%2FIntersects%3E%3C%2FOr%3E%3C%2FFilter%3E',
content)
lyr.SetAttributeFilter("ST_Intersects(shape, ST_GeomFromText('POLYGON((1.5 48.5,2.5 49.5,2.5 49.5,2.5 48.5,1.5 48.5)))')) OR " + \
"ST_Intersects(shape, ST_GeomFromText('POLYGON((1.5 48.5,2.5 49.5,2.5 49.5,2.5 48.5,1.5 48.5)))', 4326)) OR " + \
"ST_Intersects(shape, ST_GeomFromText('POLYGON((1.5 48.5,2.5 49.5,2.5 49.5,2.5 48.5,1.5 48.5)))', 'EPSG:4326'))")
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Fogc%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%22%3E%3CDWithin%3E%3CPropertyName%3Eshape%3C%2FPropertyName%3E%3Cgml:Envelope%20srsName%3D%22urn:ogc:def:crs:EPSG::4326%22%3E%3Cgml:lowerCorner%3E48.5%201.5%3C%2Fgml:lowerCorner%3E%3Cgml:upperCorner%3E49.5%202.5%3C%2Fgml:upperCorner%3E%3C%2Fgml:Envelope%3E%3CDistance%20unit%3D%22m%22%3E5%3C%2FDistance%3E%3C%2FDWithin%3E%3C%2FFilter%3E',
content)
lyr.SetAttributeFilter("ST_DWithin(shape,ST_MakeEnvelope(1.5,48.5,2.5,49.5),5)")
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
sql_lyr = ds.ExecuteSQL("SELECT * FROM my_layer WHERE ST_Intersects(shape, ST_GeomFromText('POLYGON((1.5 48.5,2.5 49.5,2.5 49.5,2.5 48.5,1.5 48.5)))')) OR " + \
"ST_Intersects(shape, ST_GeomFromText('POLYGON((1.5 48.5,2.5 49.5,2.5 49.5,2.5 48.5,1.5 48.5)))', 4326)) OR " + \
"ST_Intersects(shape, ST_GeomFromText('POLYGON((1.5 48.5,2.5 49.5,2.5 49.5,2.5 48.5,1.5 48.5)))', 'EPSG:4326'))")
f = sql_lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
ds.ReleaseResultSet(sql_lyr)
# Error case
sql_lyr = ds.ExecuteSQL("SELECT ST_Intersects(shape, ST_GeomFromText('POLYGON((1.5 48.5,2.5 49.5,2.5 49.5,2.5 48.5,1.5 48.5))')) FROM my_layer")
gdal.PushErrorHandler()
f = sql_lyr.GetNextFeature()
gdal.PopErrorHandler()
if f is not None:
gdaltest.post_reason('fail')
return 'fail'
ds.ReleaseResultSet(sql_lyr)
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_spatial_filter():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Fogc%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%22%3E%3CBBOX%3E%3CPropertyName%3Eshape%3C%2FPropertyName%3E%3Cgml:Box%3E%3Cgml:coordinates%3E48.0000000000000000,1.0000000000000000%2050.0000000000000000,3.0000000000000000%3C%2Fgml:coordinates%3E%3C%2Fgml:Box%3E%3C%2FBBOX%3E%3C%2FFilter%3E',
"""<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:ogc="http://www.opengis.net/ogc"
xmlns:foo="http://foo"
xmlns:wfs="http://www.opengis.net/wfs"
xmlns:ows="http://www.opengis.net/ows"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:gml="http://www.opengis.net/gml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberOfFeatures="1"
timeStamp="2015-04-17T14:14:24.859Z"
xsi:schemaLocation="http://foo /vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer
http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<gml:featureMembers>
<foo:my_layer gml:id="my_layer.1">
<foo:str>str</foo:str>
<foo:boolean>true</foo:boolean>
<foo:short>1</foo:short>
<foo:int>123456789</foo:int>
<foo:float>1.2</foo:float>
<foo:double>1.23</foo:double>
<foo:dt>2015-04-17T12:34:56Z</foo:dt>
<foo:shape>
<gml:Point srsDimension="2" srsName="urn:ogc:def:crs:EPSG::4326">
<gml:pos>49 2</gml:pos>
</gml:Point>
</foo:shape>
</foo:my_layer>
</gml:featureMembers>
</wfs:FeatureCollection>
""")
lyr.SetSpatialFilterRect(1,48,3,50)
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
if gdal.GetConfigOption('OGR_WFS_USE_STREAMING') == 'NO':
lyr.SetSpatialFilterRect(1.5,48.5,2.5,49.5)
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
lyr.SetSpatialFilter(None)
lyr.ResetReading()
lyr.ResetReading()
lyr.SetSpatialFilterRect(1,48,3,50)
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_one_layer_spatial_filter_and_attribute_filter():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Fogc%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%22%3E%3CAnd%3E%3CPropertyIsEqualTo%3E%3CPropertyName%3Estr%3C%2FPropertyName%3E%3CLiteral%3Estr%3C%2FLiteral%3E%3C%2FPropertyIsEqualTo%3E%3CBBOX%3E%3CPropertyName%3Eshape%3C%2FPropertyName%3E%3Cgml:Box%3E%3Cgml:coordinates%3E48.0000000000000000,1.0000000000000000%2050.0000000000000000,3.0000000000000000%3C%2Fgml:coordinates%3E%3C%2Fgml:Box%3E%3C%2FBBOX%3E%3C%2FAnd%3E%3C%2FFilter%3E',
"""<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:ogc="http://www.opengis.net/ogc"
xmlns:foo="http://foo"
xmlns:wfs="http://www.opengis.net/wfs"
xmlns:ows="http://www.opengis.net/ows"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:gml="http://www.opengis.net/gml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberOfFeatures="1"
timeStamp="2015-04-17T14:14:24.859Z"
xsi:schemaLocation="http://foo /vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer
http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<gml:featureMembers>
<foo:my_layer gml:id="my_layer.1">
<foo:str>str</foo:str>
<foo:boolean>true</foo:boolean>
<foo:short>1</foo:short>
<foo:int>123456789</foo:int>
<foo:float>1.2</foo:float>
<foo:double>1.23</foo:double>
<foo:dt>2015-04-17T12:34:56Z</foo:dt>
<foo:shape>
<gml:Point srsDimension="2" srsName="urn:ogc:def:crs:EPSG::4326">
<gml:pos>49 2</gml:pos>
</gml:Point>
</foo:shape>
</foo:my_layer>
</gml:featureMembers>
</wfs:FeatureCollection>
""")
lyr.SetSpatialFilterRect(1,48,3,50)
lyr.SetAttributeFilter("str = 'str'")
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_insertfeature():
if gdaltest.wfs_drv is None:
return 'skip'
wfs_insert_url = None
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&REQUEST=GetCapabilities',
"""<WFS_Capabilities version="1.1.0">
<OperationsMetadata>
<ows:Operation name="Transaction">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="/vsimem/wfs_endpoint"/>
<ows:Post xlink:href="/vsimem/wfs_endpoint"/>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
</OperationsMetadata>
<FeatureTypeList>
<FeatureType>
<Name>my_layer</Name>
<DefaultSRS>urn:ogc:def:crs:EPSG::4326</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180.0 -90.0</ows:LowerCorner>
<ows:UpperCorner>180.0 90.0</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
</FeatureTypeList>
<ogc:Filter_Capabilities>
<ogc:Spatial_Capabilities>
<ogc:GeometryOperands>
<ogc:GeometryOperand>gml:Envelope</ogc:GeometryOperand>
<ogc:GeometryOperand>gml:Point</ogc:GeometryOperand>
<ogc:GeometryOperand>gml:LineString</ogc:GeometryOperand>
<ogc:GeometryOperand>gml:Polygon</ogc:GeometryOperand>
</ogc:GeometryOperands>
<ogc:SpatialOperators>
<ogc:SpatialOperator name="Disjoint"/>
<ogc:SpatialOperator name="Equals"/>
<ogc:SpatialOperator name="DWithin"/>
<ogc:SpatialOperator name="Beyond"/>
<ogc:SpatialOperator name="Intersects"/>
<ogc:SpatialOperator name="Touches"/>
<ogc:SpatialOperator name="Crosses"/>
<ogc:SpatialOperator name="Within"/>
<ogc:SpatialOperator name="Contains"/>
<ogc:SpatialOperator name="Overlaps"/>
<ogc:SpatialOperator name="BBOX"/>
</ogc:SpatialOperators>
</ogc:Spatial_Capabilities>
<ogc:Scalar_Capabilities>
<ogc:LogicalOperators/>
<ogc:ComparisonOperators>
<ogc:ComparisonOperator>LessThan</ogc:ComparisonOperator>
<ogc:ComparisonOperator>GreaterThan</ogc:ComparisonOperator>
<ogc:ComparisonOperator>LessThanEqualTo</ogc:ComparisonOperator>
<ogc:ComparisonOperator>GreaterThanEqualTo</ogc:ComparisonOperator>
<ogc:ComparisonOperator>EqualTo</ogc:ComparisonOperator>
<ogc:ComparisonOperator>NotEqualTo</ogc:ComparisonOperator>
<ogc:ComparisonOperator>Like</ogc:ComparisonOperator>
<ogc:ComparisonOperator>Between</ogc:ComparisonOperator>
<ogc:ComparisonOperator>NullCheck</ogc:ComparisonOperator>
</ogc:ComparisonOperators>
<ogc:ArithmeticOperators>
<ogc:SimpleArithmetic/>
<ogc:Functions/>
</ogc:ArithmeticOperators>
</ogc:Scalar_Capabilities>
<ogc:Id_Capabilities>
<ogc:FID/>
<ogc:EID/>
</ogc:Id_Capabilities>
</ogc:Filter_Capabilities>
</WFS_Capabilities>
""")
ds = ogr.Open('WFS:/vsimem/wfs_endpoint', update = 1)
lyr = ds.GetLayer(0)
f = ogr.Feature(lyr.GetLayerDefn())
gdal.PushErrorHandler()
ret = lyr.CreateFeature(f)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
wfs_insert_url = """/vsimem/wfs_endpoint&POSTFIELDS=<?xml version="1.0"?>
<wfs:Transaction xmlns:wfs="http://www.opengis.net/wfs"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
service="WFS" version="1.1.0"
xmlns:gml="http://www.opengis.net/gml"
xmlns:ogc="http://www.opengis.net/ogc"
xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd http://foo /vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer">
<wfs:Insert>
<feature:my_layer xmlns:feature="http://foo">
</feature:my_layer>
</wfs:Insert>
</wfs:Transaction>
"""
gdal.FileFromMemBuffer(wfs_insert_url, "")
f = ogr.Feature(lyr.GetLayerDefn())
gdal.PushErrorHandler()
ret = lyr.CreateFeature(f)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer(wfs_insert_url, "<invalid_xml")
f = ogr.Feature(lyr.GetLayerDefn())
gdal.PushErrorHandler()
ret = lyr.CreateFeature(f)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer(wfs_insert_url, "<ServiceExceptionReport/>")
f = ogr.Feature(lyr.GetLayerDefn())
gdal.PushErrorHandler()
ret = lyr.CreateFeature(f)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Error returned by server') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.FileFromMemBuffer(wfs_insert_url, "<dummy_xml/>")
f = ogr.Feature(lyr.GetLayerDefn())
gdal.PushErrorHandler()
ret = lyr.CreateFeature(f)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Cannot find <TransactionResponse>') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.FileFromMemBuffer(wfs_insert_url,
"""<TransactionResponse>
</TransactionResponse>
""")
f = ogr.Feature(lyr.GetLayerDefn())
gdal.PushErrorHandler()
ret = lyr.CreateFeature(f)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer(wfs_insert_url,
"""<TransactionResponse>
<InsertResults>
<Feature>
<FeatureId/>
</Feature>
</InsertResults>
</TransactionResponse>
""")
f = ogr.Feature(lyr.GetLayerDefn())
gdal.PushErrorHandler()
ret = lyr.CreateFeature(f)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer(wfs_insert_url,
"""<TransactionResponse>
<InsertResults>
<Feature>
<FeatureId fid="my_layer.100"/>
</Feature>
</InsertResults>
</TransactionResponse>
""")
gdal.PushErrorHandler()
sql_lyr = ds.ExecuteSQL('SELECT _LAST_INSERTED_FIDS_ FROM not_existing_layer')
gdal.PopErrorHandler()
if sql_lyr is not None:
gdaltest.post_reason('fail')
return 'fail'
f = ogr.Feature(lyr.GetLayerDefn())
ret = lyr.CreateFeature(f)
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
if f.GetFID() != 100:
gdaltest.post_reason('fail')
return 'fail'
sql_lyr = ds.ExecuteSQL('SELECT _LAST_INSERTED_FIDS_ FROM my_layer')
got_f = sql_lyr.GetNextFeature()
if got_f is not None:
gdaltest.post_reason('fail')
return 'fail'
ds.ReleaseResultSet(sql_lyr)
gdal.PushErrorHandler()
ret = lyr.CreateFeature(f)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Cannot insert a feature when gml_id field is already set') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
# Empty StartTransaction + CommitTransaction
ret = lyr.StartTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
ret = lyr.CommitTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
# Empty StartTransaction + RollbackTransaction
ret = lyr.StartTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
ret = lyr.RollbackTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
# Isolated CommitTransaction
gdal.PushErrorHandler()
ret = lyr.CommitTransaction()
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
# Isolated RollbackTransaction
gdal.PushErrorHandler()
ret = lyr.RollbackTransaction()
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
# 2 StartTransaction in a row
ret = lyr.StartTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
gdal.PushErrorHandler()
ret = lyr.StartTransaction()
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
ret = lyr.RollbackTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
# Missing TransactionSummary
ret = lyr.StartTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
f = ogr.Feature(lyr.GetLayerDefn())
ret = lyr.CreateFeature(f)
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
gdal.PushErrorHandler()
ret = lyr.CommitTransaction()
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Only 0 features were inserted whereas 1 where expected') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
ret = lyr.StartTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
f = ogr.Feature(lyr.GetLayerDefn())
ret = lyr.CreateFeature(f)
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer(wfs_insert_url, "<invalid_xml")
gdal.PushErrorHandler()
ret = lyr.CommitTransaction()
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Invalid XML content') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
ret = lyr.StartTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
f = ogr.Feature(lyr.GetLayerDefn())
ret = lyr.CreateFeature(f)
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer(wfs_insert_url, "<dummy_xml/>")
gdal.PushErrorHandler()
ret = lyr.CommitTransaction()
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Cannot find <TransactionResponse>') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
ret = lyr.StartTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
f = ogr.Feature(lyr.GetLayerDefn())
ret = lyr.CreateFeature(f)
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer(wfs_insert_url, "<ServiceExceptionReport/>")
gdal.PushErrorHandler()
ret = lyr.CommitTransaction()
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Error returned by server') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
ret = lyr.StartTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
f = ogr.Feature(lyr.GetLayerDefn())
ret = lyr.CreateFeature(f)
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer(wfs_insert_url,
"""<TransactionResponse>
<TransactionSummary totalInserted="1"/>
</TransactionResponse>
""")
gdal.PushErrorHandler()
ret = lyr.CommitTransaction()
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Cannot find node InsertResults') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
ret = lyr.StartTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
f = ogr.Feature(lyr.GetLayerDefn())
ret = lyr.CreateFeature(f)
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer(wfs_insert_url,
"""<TransactionResponse>
<TransactionSummary totalInserted="1"/>
<InsertResults/>
</TransactionResponse>
""")
gdal.PushErrorHandler()
ret = lyr.CommitTransaction()
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Inconsistent InsertResults: did not get expected FID count') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
ret = lyr.StartTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
f = ogr.Feature(lyr.GetLayerDefn())
ret = lyr.CreateFeature(f)
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer(wfs_insert_url,
"""<TransactionResponse>
<TransactionSummary totalInserted="1"/>
<InsertResults>
<Feature>
</Feature>
</InsertResults>
</TransactionResponse>
""")
gdal.PushErrorHandler()
ret = lyr.CommitTransaction()
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Cannot find fid') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
ret = lyr.StartTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
f = ogr.Feature(lyr.GetLayerDefn())
ret = lyr.CreateFeature(f)
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer(wfs_insert_url,
"""<TransactionResponse>
<TransactionSummary totalInserted="1"/>
<InsertResults>
<Feature>
<FeatureId fid="my_layer.100"/>
</Feature>
</InsertResults>
</TransactionResponse>
""")
ret = lyr.CommitTransaction()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
sql_lyr = ds.ExecuteSQL('SELECT _LAST_INSERTED_FIDS_ FROM my_layer')
f = sql_lyr.GetNextFeature()
if f.gml_id != 'my_layer.100':
gdaltest.post_reason('fail')
return 'fail'
sql_lyr.ResetReading()
sql_lyr.SetNextByIndex(0)
sql_lyr.GetFeature(0)
sql_lyr.GetLayerDefn()
sql_lyr.GetFeatureCount()
sql_lyr.TestCapability('foo')
ds.ReleaseResultSet(sql_lyr)
gdal.Unlink(wfs_insert_url)
wfs_insert_url = None
wfs_insert_url = """/vsimem/wfs_endpoint&POSTFIELDS=<?xml version="1.0"?>
<wfs:Transaction xmlns:wfs="http://www.opengis.net/wfs"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
service="WFS" version="1.1.0"
xmlns:gml="http://www.opengis.net/gml"
xmlns:ogc="http://www.opengis.net/ogc"
xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd http://foo /vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer">
<wfs:Insert>
<feature:my_layer xmlns:feature="http://foo">
<feature:str>foo</feature:str>
<feature:int>123456789</feature:int>
<feature:double>2.34</feature:double>
<feature:shape><gml:Point srsName="urn:ogc:def:crs:EPSG::4326"><gml:pos>49 2</gml:pos></gml:Point></feature:shape>
</feature:my_layer>
</wfs:Insert>
</wfs:Transaction>
"""
gdal.FileFromMemBuffer(wfs_insert_url,
"""<TransactionResponse>
<TransactionSummary totalInserted="1"/>
<InsertResults>
<Feature>
<FeatureId fid="my_layer.100"/>
</Feature>
</InsertResults>
</TransactionResponse>
""")
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField('str', 'foo')
f.SetField('int', 123456789)
f.SetField('double', 2.34)
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT (2 49)'))
ret = lyr.CreateFeature(f)
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
gdal.Unlink(wfs_insert_url)
wfs_insert_url = None
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_updatefeature():
if gdaltest.wfs_drv is None:
return 'skip'
wfs_update_url = None
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint', update = 1)
lyr = ds.GetLayer(0)
f = ogr.Feature(lyr.GetLayerDefn())
gdal.PushErrorHandler()
ret = lyr.CreateFeature(f)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
return 'fail'
f = ogr.Feature(lyr.GetLayerDefn())
gdal.PushErrorHandler()
ret = lyr.SetFeature(f)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Cannot update a feature when gml_id field is not set') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField('gml_id', 'my_layer.1')
gdal.PushErrorHandler()
ret = lyr.SetFeature(f)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
wfs_update_url = """/vsimem/wfs_endpoint&POSTFIELDS=<?xml version="1.0"?>
<wfs:Transaction xmlns:wfs="http://www.opengis.net/wfs"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
service="WFS" version="1.1.0"
xmlns:gml="http://www.opengis.net/gml"
xmlns:ogc="http://www.opengis.net/ogc"
xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd http://foo /vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer">
<wfs:Update typeName="feature:my_layer" xmlns:feature="http://foo">
<wfs:Property>
<wfs:Name>shape</wfs:Name>
</wfs:Property>
<wfs:Property>
<wfs:Name>str</wfs:Name>
</wfs:Property>
<wfs:Property>
<wfs:Name>boolean</wfs:Name>
</wfs:Property>
<wfs:Property>
<wfs:Name>short</wfs:Name>
</wfs:Property>
<wfs:Property>
<wfs:Name>int</wfs:Name>
</wfs:Property>
<wfs:Property>
<wfs:Name>float</wfs:Name>
</wfs:Property>
<wfs:Property>
<wfs:Name>double</wfs:Name>
</wfs:Property>
<wfs:Property>
<wfs:Name>dt</wfs:Name>
</wfs:Property>
<ogc:Filter>
<ogc:GmlObjectId gml:id="my_layer.1"/>
</ogc:Filter>
</wfs:Update>
</wfs:Transaction>
"""
gdal.FileFromMemBuffer(wfs_update_url, "")
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField('gml_id', 'my_layer.1')
gdal.PushErrorHandler()
ret = lyr.SetFeature(f)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Empty content returned by server') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.FileFromMemBuffer(wfs_update_url, "<invalid_xmm")
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField('gml_id', 'my_layer.1')
gdal.PushErrorHandler()
ret = lyr.SetFeature(f)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Invalid XML content') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.FileFromMemBuffer(wfs_update_url, "<ServiceExceptionReport/>")
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField('gml_id', 'my_layer.1')
gdal.PushErrorHandler()
ret = lyr.SetFeature(f)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Error returned by server') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.FileFromMemBuffer(wfs_update_url, "<foo/>")
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField('gml_id', 'my_layer.1')
gdal.PushErrorHandler()
ret = lyr.SetFeature(f)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Cannot find <TransactionResponse>') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.FileFromMemBuffer(wfs_update_url, "<TransactionResponse/>")
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField('gml_id', 'my_layer.1')
ret = lyr.SetFeature(f)
if ret != 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.Unlink(wfs_update_url)
wfs_update_url = """/vsimem/wfs_endpoint&POSTFIELDS=<?xml version="1.0"?>
<wfs:Transaction xmlns:wfs="http://www.opengis.net/wfs"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
service="WFS" version="1.1.0"
xmlns:gml="http://www.opengis.net/gml"
xmlns:ogc="http://www.opengis.net/ogc"
xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd http://foo /vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer">
<wfs:Update typeName="feature:my_layer" xmlns:feature="http://foo">
<wfs:Property>
<wfs:Name>shape</wfs:Name>
<wfs:Value><gml:Point srsName="urn:ogc:def:crs:EPSG::4326"><gml:pos>49 2</gml:pos></gml:Point></wfs:Value>
</wfs:Property>
<wfs:Property>
<wfs:Name>str</wfs:Name>
<wfs:Value>foo</wfs:Value>
</wfs:Property>
<wfs:Property>
<wfs:Name>boolean</wfs:Name>
</wfs:Property>
<wfs:Property>
<wfs:Name>short</wfs:Name>
</wfs:Property>
<wfs:Property>
<wfs:Name>int</wfs:Name>
<wfs:Value>123456789</wfs:Value>
</wfs:Property>
<wfs:Property>
<wfs:Name>float</wfs:Name>
</wfs:Property>
<wfs:Property>
<wfs:Name>double</wfs:Name>
<wfs:Value>2.34</wfs:Value>
</wfs:Property>
<wfs:Property>
<wfs:Name>dt</wfs:Name>
</wfs:Property>
<ogc:Filter>
<ogc:GmlObjectId gml:id="my_layer.1"/>
</ogc:Filter>
</wfs:Update>
</wfs:Transaction>
"""
gdal.FileFromMemBuffer(wfs_update_url, "<TransactionResponse/>")
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField('gml_id', 'my_layer.1')
f.SetField('str', 'foo')
f.SetField('int', 123456789)
f.SetField('double', 2.34)
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT (2 49)'))
ret = lyr.SetFeature(f)
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_deletefeature():
if gdaltest.wfs_drv is None:
return 'skip'
wfs_delete_url = None
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs_endpoint', update = 1)
lyr = ds.GetLayer(0)
gdal.PushErrorHandler()
ret = lyr.DeleteFeature(200)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Fogc%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%22%3E%3CGmlObjectId%20id%3D%22my_layer.200%22%2F%3E%3C%2FFilter%3E',
"""<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:ogc="http://www.opengis.net/ogc"
xmlns:foo="http://foo"
xmlns:wfs="http://www.opengis.net/wfs"
xmlns:ows="http://www.opengis.net/ows"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:gml="http://www.opengis.net/gml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberOfFeatures="1"
timeStamp="2015-04-17T14:14:24.859Z"
xsi:schemaLocation="http://foo /vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer
http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<gml:featureMembers>
<foo:my_layer gml:id="my_layer.200">
</foo:my_layer>
</gml:featureMembers>
</wfs:FeatureCollection>
""")
ds = ogr.Open('WFS:/vsimem/wfs_endpoint', update = 1)
lyr = ds.GetLayer(0)
gdal.PushErrorHandler()
ret = lyr.DeleteFeature(200)
gdal.PopErrorHandler()
if ret == 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
ds = ogr.Open('WFS:/vsimem/wfs_endpoint', update = 1)
lyr = ds.GetLayer(0)
wfs_delete_url = """/vsimem/wfs_endpoint&POSTFIELDS=<?xml version="1.0"?>
<wfs:Transaction xmlns:wfs="http://www.opengis.net/wfs"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
service="WFS" version="1.1.0"
xmlns:gml="http://www.opengis.net/gml"
xmlns:ogc="http://www.opengis.net/ogc"
xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd http://foo /vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer">
<wfs:Delete xmlns:feature="http://foo" typeName="feature:my_layer">
<ogc:Filter>
<ogc:FeatureId fid="my_layer.200"/>
</ogc:Filter>
</wfs:Delete>
</wfs:Transaction>
"""
gdal.FileFromMemBuffer(wfs_delete_url, "")
gdal.PushErrorHandler()
ret = lyr.DeleteFeature(200)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Empty content returned by server') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
ds = ogr.Open('WFS:/vsimem/wfs_endpoint', update = 1)
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer(wfs_delete_url, "<invalid_xml>")
gdal.PushErrorHandler()
ret = lyr.DeleteFeature(200)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Invalid XML content') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
ds = ogr.Open('WFS:/vsimem/wfs_endpoint', update = 1)
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer(wfs_delete_url, "<foo/>")
gdal.PushErrorHandler()
ret = lyr.DeleteFeature(200)
gdal.PopErrorHandler()
if ret == 0 or gdal.GetLastErrorMsg().find('Cannot find <TransactionResponse>') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
ds = ogr.Open('WFS:/vsimem/wfs_endpoint', update = 1)
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer(wfs_delete_url, "<TransactionResponse/>")
ret = lyr.DeleteFeature(200)
if ret != 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.Unlink(wfs_delete_url)
wfs_delete_url = """/vsimem/wfs_endpoint&POSTFIELDS=<?xml version="1.0"?>
<wfs:Transaction xmlns:wfs="http://www.opengis.net/wfs"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
service="WFS" version="1.1.0"
xmlns:gml="http://www.opengis.net/gml"
xmlns:ogc="http://www.opengis.net/ogc"
xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd http://foo /vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer">
<wfs:Delete xmlns:feature="http://foo" typeName="feature:my_layer">
<ogc:Filter>
<GmlObjectId id="my_layer.200"/> </ogc:Filter>
</wfs:Delete>
</wfs:Transaction>
"""
gdal.FileFromMemBuffer(wfs_delete_url, "<TransactionResponse/>")
gdal.ErrorReset()
sql_lyr = ds.ExecuteSQL("DELETE FROM my_layer WHERE gml_id = 'my_layer.200'")
if gdal.GetLastErrorMsg() != '':
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.ErrorReset()
gdal.PushErrorHandler()
sql_lyr = ds.ExecuteSQL("DELETE FROM ")
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.ErrorReset()
gdal.PushErrorHandler()
sql_lyr = ds.ExecuteSQL("DELETE FROM non_existing_layer WHERE truc")
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg().find('Unknown layer') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.ErrorReset()
gdal.PushErrorHandler()
sql_lyr = ds.ExecuteSQL("DELETE FROM my_layer BLA")
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg().find('WHERE clause missing') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.ErrorReset()
gdal.PushErrorHandler()
sql_lyr = ds.ExecuteSQL("DELETE FROM my_layer WHERE -")
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg().find('SQL Expression Parsing Error') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.ErrorReset()
gdal.PushErrorHandler()
sql_lyr = ds.ExecuteSQL("DELETE FROM my_layer WHERE ogr_geometry = 'POINT'")
gdal.PopErrorHandler()
if sql_lyr is not None or gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_schema_not_understood():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
# Invalid response, but enough for use
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint_schema_not_understood?SERVICE=WFS&REQUEST=GetCapabilities',
"""<WFS_Capabilities version="1.1.0">
<FeatureTypeList>
<FeatureType/>
<FeatureType>
<Name>my_layer</Name>
</FeatureType>
</FeatureTypeList>
</WFS_Capabilities>
""")
ds = ogr.Open('WFS:/vsimem/wfs_endpoint_schema_not_understood')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint_schema_not_understood?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer',
"""<xsd:schema xmlns:foo="http://foo" xmlns:gml="http://www.opengis.net/gml" xmlns:xsd="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified" targetNamespace="http://foo">
<xsd:import namespace="http://www.opengis.net/gml" schemaLocation="http://foo/schemas/gml/3.1.1/base/gml.xsd"/>
<xsd:complexType name="my_layerType">
<xsd:complexContent>
<xsd:extension base="gml:AbstractFeatureType">
<xsd:sequence>
<xsd:element maxOccurs="1" minOccurs="0" name="str" nillable="true" type="SOME_TYPE_I_DONT_UNDERSTAND"/>
<xsd:element maxOccurs="1" minOccurs="0" name="boolean" nillable="true" type="xsd:boolean"/>
<xsd:element maxOccurs="1" minOccurs="0" name="short" nillable="true" type="xsd:short"/>
<xsd:element maxOccurs="1" minOccurs="0" name="int" nillable="true" type="xsd:int"/>
<xsd:element maxOccurs="1" minOccurs="0" name="float" nillable="true" type="xsd:float"/>
<xsd:element maxOccurs="1" minOccurs="0" name="double" nillable="true" type="xsd:double"/>
<xsd:element maxOccurs="1" minOccurs="0" name="dt" nillable="true" type="xsd:dateTime"/>
<xsd:element maxOccurs="1" minOccurs="0" name="shape" nillable="true" type="gml:PointPropertyType"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:element name="my_layer" substitutionGroup="gml:_Feature" type="foo:my_layerType"/>
</xsd:schema>
""")
gdal.PushErrorHandler()
lyr_defn = lyr.GetLayerDefn()
gdal.PopErrorHandler()
if lyr_defn.GetFieldCount() != 0:
gdaltest.post_reason('fail')
return 'fail'
ds = ogr.Open('WFS:/vsimem/wfs_endpoint_schema_not_understood')
lyr = ds.GetLayer(0)
content = \
"""<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:ogc="http://www.opengis.net/ogc"
xmlns:foo="http://foo"
xmlns:wfs="http://www.opengis.net/wfs"
xmlns:ows="http://www.opengis.net/ows"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:gml="http://www.opengis.net/gml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberOfFeatures="1"
timeStamp="2015-04-17T14:14:24.859Z"
xsi:schemaLocation="http://foo /vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer
http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<gml:featureMembers>
<foo:my_layer gml:id="my_layer.1">
<foo:str>str</foo:str>
<foo:boolean>true</foo:boolean>
<foo:short>1</foo:short>
<foo:int>123456789</foo:int>
<foo:float>1.2</foo:float>
<foo:double>1.23</foo:double>
<foo:dt>2015-04-17T12:34:56Z</foo:dt>
<foo:shape>
<gml:Point srsDimension="2" srsName="urn:ogc:def:crs:EPSG::4326">
<gml:pos>49 2</gml:pos>
</gml:Point>
</foo:shape>
</foo:my_layer>
</gml:featureMembers>
</wfs:FeatureCollection>
"""
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint_schema_not_understood?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer&MAXFEATURES=1', content)
lyr_defn = lyr.GetLayerDefn()
if lyr_defn.GetFieldCount() != 8:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer('/vsimem/wfs_endpoint_schema_not_understood?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=my_layer', content)
f = lyr.GetNextFeature()
if f.gml_id != 'my_layer.1' or f.boolean != 1 or f.str != 'str' or f.short != 1 or \
f.int != 123456789 or f.float != 1.2 or f.double != 1.23 or f.dt != '2015-04-17T12:34:56Z' or \
f.GetGeometryRef().ExportToWkt() != 'POINT (2 49)':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_multiple_layers():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/wfs110_multiple_layers?SERVICE=WFS&REQUEST=GetCapabilities',
"""<WFS_Capabilities version="1.1.0">
<FeatureTypeList>
<FeatureType>
<Name>my_layer</Name>
<DefaultSRS>urn:ogc:def:crs:EPSG::4326</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180.0 -90.0</ows:LowerCorner>
<ows:UpperCorner>180.0 90.0</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
<FeatureType>
<Name>my_layer2</Name>
<DefaultSRS>urn:ogc:def:crs:EPSG::4326</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180.0 -90.0</ows:LowerCorner>
<ows:UpperCorner>180.0 90.0</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
</FeatureTypeList>
</WFS_Capabilities>
""")
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs110_multiple_layers')
lyr = ds.GetLayer(0)
gdal.PushErrorHandler()
lyr_defn = lyr.GetLayerDefn()
gdal.PopErrorHandler()
if lyr_defn.GetFieldCount() != 0:
gdaltest.post_reason('fail')
return 'fail'
ds = ogr.Open('WFS:/vsimem/wfs110_multiple_layers')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs110_multiple_layers?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer,my_layer2',
"<ServiceExceptionReport/>")
lyr = ds.GetLayer(0)
gdal.PushErrorHandler()
lyr_defn = lyr.GetLayerDefn()
gdal.PopErrorHandler()
if lyr_defn.GetFieldCount() != 0:
gdaltest.post_reason('fail')
return 'fail'
ds = ogr.Open('WFS:/vsimem/wfs110_multiple_layers')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs110_multiple_layers?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer,my_layer2',
"<invalid_xml")
lyr = ds.GetLayer(0)
gdal.PushErrorHandler()
lyr_defn = lyr.GetLayerDefn()
gdal.PopErrorHandler()
if lyr_defn.GetFieldCount() != 0:
gdaltest.post_reason('fail')
return 'fail'
ds = ogr.Open('WFS:/vsimem/wfs110_multiple_layers')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs110_multiple_layers?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer,my_layer2',
"<no_schema/>")
lyr = ds.GetLayer(0)
gdal.PushErrorHandler()
lyr_defn = lyr.GetLayerDefn()
gdal.PopErrorHandler()
if lyr_defn.GetFieldCount() != 0:
gdaltest.post_reason('fail')
return 'fail'
ds = ogr.Open('WFS:/vsimem/wfs110_multiple_layers')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs110_multiple_layers?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer,my_layer2',
"""<xsd:schema xmlns:foo="http://foo" xmlns:gml="http://www.opengis.net/gml" xmlns:xsd="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified" targetNamespace="http://foo">
<xsd:import namespace="http://www.opengis.net/gml" schemaLocation="http://foo/schemas/gml/3.2.1/base/gml.xsd"/>
<xsd:complexType name="my_layerType">
<xsd:complexContent>
<xsd:extension base="gml:AbstractFeatureType">
<xsd:sequence>
<xsd:element maxOccurs="1" minOccurs="0" name="str" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:element name="my_layer" substitutionGroup="gml:_Feature" type="foo:my_layerType"/>
<xsd:complexType name="my_layer2Type">
<xsd:complexContent>
<xsd:extension base="gml:AbstractFeatureType">
<xsd:sequence>
<xsd:element maxOccurs="1" minOccurs="0" name="str" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:element name="my_layer2" substitutionGroup="gml:_Feature" type="foo:my_layer2Type"/>
</xsd:schema>
""")
lyr = ds.GetLayer(0)
lyr_defn = lyr.GetLayerDefn()
if lyr_defn.GetFieldCount() != 2:
gdaltest.post_reason('fail')
return 'fail'
lyr = ds.GetLayer(1)
lyr_defn = lyr.GetLayerDefn()
if lyr_defn.GetFieldCount() != 2:
gdaltest.post_reason('fail')
return 'fail'
ds = ogr.Open('WFS:/vsimem/wfs110_multiple_layers')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs110_multiple_layers?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer,my_layer2',
"""<xsd:schema xmlns:foo="http://foo" xmlns:gml="http://www.opengis.net/gml" xmlns:xsd="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified" targetNamespace="http://foo">
<xsd:import namespace="http://www.opengis.net/gml" schemaLocation="http://foo/schemas/gml/3.2.1/base/gml.xsd"/>
<xsd:complexType name="my_layerType">
<xsd:complexContent>
<xsd:extension base="gml:AbstractFeatureType">
<xsd:sequence>
<xsd:element maxOccurs="1" minOccurs="0" name="str" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:element name="my_layer" substitutionGroup="gml:_Feature" type="foo:my_layerType"/>
</xsd:schema>
""")
lyr = ds.GetLayer(0)
lyr_defn = lyr.GetLayerDefn()
if lyr_defn.GetFieldCount() != 2:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer('/vsimem/wfs110_multiple_layers?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer2',
"""<xsd:schema xmlns:foo="http://foo" xmlns:gml="http://www.opengis.net/gml" xmlns:xsd="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified" targetNamespace="http://foo">
<xsd:import namespace="http://www.opengis.net/gml" schemaLocation="http://foo/schemas/gml/3.2.1/base/gml.xsd"/>
<xsd:complexType name="my_layer2Type">
<xsd:complexContent>
<xsd:extension base="gml:AbstractFeatureType">
<xsd:sequence>
<xsd:element maxOccurs="1" minOccurs="0" name="str" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:element name="my_layer2" substitutionGroup="gml:_Feature" type="foo:my_layer2Type"/>
</xsd:schema>
""")
lyr = ds.GetLayer(1)
lyr_defn = lyr.GetLayerDefn()
if lyr_defn.GetFieldCount() != 2:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs110_multiple_layers_same_name_different_ns():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/wfs110_multiple_layers_different_ns?SERVICE=WFS&REQUEST=GetCapabilities',
"""<WFS_Capabilities version="1.1.0">
<FeatureTypeList>
<FeatureType>
<Name>ns1:my_layer</Name>
<DefaultSRS>urn:ogc:def:crs:EPSG::4326</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180.0 -90.0</ows:LowerCorner>
<ows:UpperCorner>180.0 90.0</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
<FeatureType>
<Name>ns2:my_layer</Name>
<DefaultSRS>urn:ogc:def:crs:EPSG::4326</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180.0 -90.0</ows:LowerCorner>
<ows:UpperCorner>180.0 90.0</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
</FeatureTypeList>
</WFS_Capabilities>
""")
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs110_multiple_layers_different_ns')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs110_multiple_layers_different_ns?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=ns1:my_layer',
"""<xsd:schema xmlns:ns1="http://ns1" xmlns:ns2="http://ns2" xmlns:gml="http://www.opengis.net/gml" xmlns:xsd="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified" targetNamespace="http://foo">
<xsd:import namespace="http://www.opengis.net/gml" schemaLocation="http://foo/schemas/gml/3.2.1/base/gml.xsd"/>
<xsd:complexType name="my_layerType">
<xsd:complexContent>
<xsd:extension base="gml:AbstractFeatureType">
<xsd:sequence>
<xsd:element maxOccurs="1" minOccurs="0" name="str" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:element name="my_layer" substitutionGroup="gml:_Feature" type="my_layerType"/>
</xsd:schema>
""")
lyr = ds.GetLayer(0)
lyr_defn = lyr.GetLayerDefn()
if lyr_defn.GetFieldCount() != 2:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer('/vsimem/wfs110_multiple_layers_different_ns?SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&TYPENAME=ns1:my_layer',
"""<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:ogc="http://www.opengis.net/ogc"
xmlns:ns1="http://ns1"
xmlns:wfs="http://www.opengis.net/wfs"
xmlns:ows="http://www.opengis.net/ows"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:gml="http://www.opengis.net/gml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberMatched="unknown" numberReturned="2"
timeStamp="2015-04-17T14:14:24.859Z"
xsi:schemaLocation="http://ns1 /vsimem/wfs_endpoint?SERVICE=WFS&VERSION=2.0.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer
http://www.opengis.net/wfs/2.0 http://schemas.opengis.net/wfs/2.0/wfs.xsd">
<gml:featureMembers>
<ns1:my_layer gml:id="my_layer.1">
</ns1:my_layer>
</gml:featureMembers>
</wfs:FeatureCollection>
""")
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer('/vsimem/wfs110_multiple_layers_different_ns?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=ns2:my_layer',
"""<xsd:schema xmlns:ns2="http://ns2" xmlns:ns2="http://ns2" xmlns:gml="http://www.opengis.net/gml" xmlns:xsd="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified" targetNamespace="http://foo">
<xsd:import namespace="http://www.opengis.net/gml" schemaLocation="http://foo/schemas/gml/3.2.1/base/gml.xsd"/>
<xsd:complexType name="my_layerType">
<xsd:complexContent>
<xsd:extension base="gml:AbstractFeatureType">
<xsd:sequence>
<xsd:element maxOccurs="1" minOccurs="0" name="str" nillable="true" type="xsd:string"/>
<xsd:element maxOccurs="1" minOccurs="0" name="str2" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:element name="my_layer" substitutionGroup="gml:_Feature" type="my_layerType"/>
</xsd:schema>
""")
lyr = ds.GetLayer(1)
lyr_defn = lyr.GetLayerDefn()
if lyr_defn.GetFieldCount() != 3:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs200_paging():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_paging?SERVICE=WFS&REQUEST=GetCapabilities',
"""<WFS_Capabilities version="2.0.0">
<OperationsMetadata>
<ows:Operation name="GetFeature">
<ows:Constraint name="CountDefault">
<ows:NoValues/>
<ows:DefaultValue>2</ows:DefaultValue>
</ows:Constraint>
</ows:Operation>
<ows:Constraint name="ImplementsResultPaging">
<ows:NoValues/><ows:DefaultValue>TRUE</ows:DefaultValue>
</ows:Constraint>
</OperationsMetadata>
<FeatureTypeList>
<FeatureType>
<Name>my_layer</Name>
<Title>title</Title>
<Abstract>abstract</Abstract>
<Keywords>
<Keyword>keyword</Keyword>
</Keywords>
<DefaultSRS>urn:ogc:def:crs:EPSG::4326</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180.0 -90.0</ows:LowerCorner>
<ows:UpperCorner>180.0 90.0</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
</FeatureTypeList>
<ogc:Filter_Capabilities>
<ogc:Spatial_Capabilities>
<ogc:GeometryOperands>
<ogc:GeometryOperand>gml:Envelope</ogc:GeometryOperand>
<ogc:GeometryOperand>gml:Point</ogc:GeometryOperand>
<ogc:GeometryOperand>gml:LineString</ogc:GeometryOperand>
<ogc:GeometryOperand>gml:Polygon</ogc:GeometryOperand>
</ogc:GeometryOperands>
<ogc:SpatialOperators>
<ogc:SpatialOperator name="Disjoint"/>
<ogc:SpatialOperator name="Equals"/>
<ogc:SpatialOperator name="DWithin"/>
<ogc:SpatialOperator name="Beyond"/>
<ogc:SpatialOperator name="Intersects"/>
<ogc:SpatialOperator name="Touches"/>
<ogc:SpatialOperator name="Crosses"/>
<ogc:SpatialOperator name="Within"/>
<ogc:SpatialOperator name="Contains"/>
<ogc:SpatialOperator name="Overlaps"/>
<ogc:SpatialOperator name="BBOX"/>
</ogc:SpatialOperators>
</ogc:Spatial_Capabilities>
<ogc:Scalar_Capabilities>
<ogc:LogicalOperators/>
<ogc:ComparisonOperators>
<ogc:ComparisonOperator>LessThan</ogc:ComparisonOperator>
<ogc:ComparisonOperator>GreaterThan</ogc:ComparisonOperator>
<ogc:ComparisonOperator>LessThanEqualTo</ogc:ComparisonOperator>
<ogc:ComparisonOperator>GreaterThanEqualTo</ogc:ComparisonOperator>
<ogc:ComparisonOperator>EqualTo</ogc:ComparisonOperator>
<ogc:ComparisonOperator>NotEqualTo</ogc:ComparisonOperator>
<ogc:ComparisonOperator>Like</ogc:ComparisonOperator>
<ogc:ComparisonOperator>Between</ogc:ComparisonOperator>
<ogc:ComparisonOperator>NullCheck</ogc:ComparisonOperator>
</ogc:ComparisonOperators>
<ogc:ArithmeticOperators>
<ogc:SimpleArithmetic/>
<ogc:Functions/>
</ogc:ArithmeticOperators>
</ogc:Scalar_Capabilities>
<ogc:Id_Capabilities>
<ogc:FID/>
<ogc:EID/>
</ogc:Id_Capabilities>
</ogc:Filter_Capabilities>
</WFS_Capabilities>
""")
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs200_endpoint_paging')
lyr = ds.GetLayer(0)
if lyr.GetMetadata() != {'ABSTRACT': 'abstract', 'KEYWORD_1': 'keyword', 'TITLE': 'title'}:
gdaltest.post_reason('fail')
print(lyr.GetMetadata())
return 'fail'
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_paging?SERVICE=WFS&VERSION=2.0.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer',
"""<xsd:schema xmlns:foo="http://foo" xmlns:gml="http://www.opengis.net/gml" xmlns:xsd="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified" targetNamespace="http://foo">
<xsd:import namespace="http://www.opengis.net/gml" schemaLocation="http://foo/schemas/gml/3.2.1/base/gml.xsd"/>
<xsd:complexType name="my_layerType">
<xsd:complexContent>
<xsd:extension base="gml:AbstractFeatureType">
<xsd:sequence>
<xsd:element maxOccurs="1" minOccurs="0" name="str" nillable="true" type="xsd:string"/>
<xsd:element maxOccurs="1" minOccurs="0" name="boolean" nillable="true" type="xsd:boolean"/>
<xsd:element maxOccurs="1" minOccurs="0" name="short" nillable="true" type="xsd:short"/>
<xsd:element maxOccurs="1" minOccurs="0" name="int" nillable="true" type="xsd:int"/>
<xsd:element maxOccurs="1" minOccurs="0" name="float" nillable="true" type="xsd:float"/>
<xsd:element maxOccurs="1" minOccurs="0" name="double" nillable="true" type="xsd:double"/>
<xsd:element maxOccurs="1" minOccurs="0" name="dt" nillable="true" type="xsd:dateTime"/>
<xsd:element maxOccurs="1" minOccurs="0" name="shape" nillable="true" type="gml:PointPropertyType"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:element name="my_layer" substitutionGroup="gml:_Feature" type="foo:my_layerType"/>
</xsd:schema>
""")
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_paging?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=my_layer&STARTINDEX=0&COUNT=2',
"""<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:ogc="http://www.opengis.net/ogc"
xmlns:foo="http://foo"
xmlns:wfs="http://www.opengis.net/wfs"
xmlns:ows="http://www.opengis.net/ows"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:gml="http://www.opengis.net/gml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberMatched="unknown" numberReturned="2"
timeStamp="2015-04-17T14:14:24.859Z"
xsi:schemaLocation="http://foo /vsimem/wfs_endpoint?SERVICE=WFS&VERSION=2.0.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer
http://www.opengis.net/wfs/2.0 http://schemas.opengis.net/wfs/2.0/wfs.xsd">
<gml:featureMembers>
<foo:my_layer gml:id="my_layer.1">
<foo:str>str</foo:str>
<foo:boolean>true</foo:boolean>
<foo:short>1</foo:short>
<foo:int>123456789</foo:int>
<foo:float>1.2</foo:float>
<foo:double>1.23</foo:double>
<foo:dt>2015-04-17T12:34:56Z</foo:dt>
<foo:shape>
<gml:Point srsDimension="2" srsName="urn:ogc:def:crs:EPSG::4326">
<gml:pos>49 2</gml:pos>
</gml:Point>
</foo:shape>
</foo:my_layer>
</gml:featureMembers>
<gml:featureMembers>
<foo:my_layer gml:id="my_layer.2">
</foo:my_layer>
</gml:featureMembers>
</wfs:FeatureCollection>
""")
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_paging?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=my_layer&STARTINDEX=2&COUNT=2',
"""<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:ogc="http://www.opengis.net/ogc"
xmlns:foo="http://foo"
xmlns:wfs="http://www.opengis.net/wfs"
xmlns:ows="http://www.opengis.net/ows"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:gml="http://www.opengis.net/gml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberMatched="unknown" numberReturned="1"
timeStamp="2015-04-17T14:14:24.859Z"
xsi:schemaLocation="http://foo /vsimem/wfs_endpoint?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer
http://www.opengis.net/wfs/2.0 http://schemas.opengis.net/wfs/2.0/wfs.xsd">
<gml:featureMembers>
<foo:my_layer gml:id="my_layer.3">
</foo:my_layer>
</gml:featureMembers>
</wfs:FeatureCollection>
""")
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
if f.gml_id != 'my_layer.1':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
if f.gml_id != 'my_layer.2':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
if f.gml_id != 'my_layer.3':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
f= lyr.GetNextFeature()
if f is not None:
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
#if lyr.GetFeatureCount() != 3:
# gdaltest.post_reason('fail')
# print(lyr.GetFeatureCount())
# return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs200_json():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_json?SERVICE=WFS&REQUEST=GetCapabilities',
"""<WFS_Capabilities version="2.0.0">
<OperationsMetadata>
<ows:Operation name="GetFeature">
<ows:Parameter name="resultType">
<ows:Value>results</ows:Value>
<ows:Value>hits</ows:Value>
</ows:Parameter>
<ows:Parameter name="outputFormat">
<ows:AllowedValues>
<ows:Value>application/json</ows:Value>
</ows:AllowedValues>
</ows:Parameter>
<ows:Constraint name="CountDefault">
<ows:NoValues/>
<ows:DefaultValue>2</ows:DefaultValue>
</ows:Constraint>
</ows:Operation>
<ows:Constraint name="ImplementsResultPaging">
<ows:NoValues/><ows:DefaultValue>TRUE</ows:DefaultValue>
</ows:Constraint>
</OperationsMetadata>
<FeatureTypeList>
<FeatureType>
<Name>my_layer</Name>
<DefaultSRS>urn:ogc:def:crs:EPSG::4326</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180.0 -90.0</ows:LowerCorner>
<ows:UpperCorner>180.0 90.0</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
</FeatureTypeList>
<ogc:Filter_Capabilities>
<ogc:Spatial_Capabilities>
<ogc:GeometryOperands>
<ogc:GeometryOperand>gml:Envelope</ogc:GeometryOperand>
<ogc:GeometryOperand>gml:Point</ogc:GeometryOperand>
<ogc:GeometryOperand>gml:LineString</ogc:GeometryOperand>
<ogc:GeometryOperand>gml:Polygon</ogc:GeometryOperand>
</ogc:GeometryOperands>
<ogc:SpatialOperators>
<ogc:SpatialOperator name="Disjoint"/>
<ogc:SpatialOperator name="Equals"/>
<ogc:SpatialOperator name="DWithin"/>
<ogc:SpatialOperator name="Beyond"/>
<ogc:SpatialOperator name="Intersects"/>
<ogc:SpatialOperator name="Touches"/>
<ogc:SpatialOperator name="Crosses"/>
<ogc:SpatialOperator name="Within"/>
<ogc:SpatialOperator name="Contains"/>
<ogc:SpatialOperator name="Overlaps"/>
<ogc:SpatialOperator name="BBOX"/>
</ogc:SpatialOperators>
</ogc:Spatial_Capabilities>
<ogc:Scalar_Capabilities>
<ogc:LogicalOperators/>
<ogc:ComparisonOperators>
<ogc:ComparisonOperator>LessThan</ogc:ComparisonOperator>
<ogc:ComparisonOperator>GreaterThan</ogc:ComparisonOperator>
<ogc:ComparisonOperator>LessThanEqualTo</ogc:ComparisonOperator>
<ogc:ComparisonOperator>GreaterThanEqualTo</ogc:ComparisonOperator>
<ogc:ComparisonOperator>EqualTo</ogc:ComparisonOperator>
<ogc:ComparisonOperator>NotEqualTo</ogc:ComparisonOperator>
<ogc:ComparisonOperator>Like</ogc:ComparisonOperator>
<ogc:ComparisonOperator>Between</ogc:ComparisonOperator>
<ogc:ComparisonOperator>NullCheck</ogc:ComparisonOperator>
</ogc:ComparisonOperators>
<ogc:ArithmeticOperators>
<ogc:SimpleArithmetic/>
<ogc:Functions/>
</ogc:ArithmeticOperators>
</ogc:Scalar_Capabilities>
<ogc:Id_Capabilities>
<ogc:FID/>
<ogc:EID/>
</ogc:Id_Capabilities>
</ogc:Filter_Capabilities>
</WFS_Capabilities>
""")
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs200_endpoint_json?OUTPUTFORMAT=application/json')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_json?SERVICE=WFS&VERSION=2.0.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer',
"""<xsd:schema xmlns:foo="http://foo" xmlns:gml="http://www.opengis.net/gml" xmlns:xsd="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified" targetNamespace="http://foo">
<xsd:import namespace="http://www.opengis.net/gml" schemaLocation="http://foo/schemas/gml/3.2.1/base/gml.xsd"/>
<xsd:complexType name="my_layerType">
<xsd:complexContent>
<xsd:extension base="gml:AbstractFeatureType">
<xsd:sequence>
<xsd:element maxOccurs="1" minOccurs="0" name="str" nillable="true" type="xsd:string"/>
<xsd:element maxOccurs="1" minOccurs="0" name="boolean" nillable="true" type="xsd:boolean"/>
<xsd:element maxOccurs="1" minOccurs="0" name="short" nillable="true" type="xsd:short"/>
<xsd:element maxOccurs="1" minOccurs="0" name="int" nillable="true" type="xsd:int"/>
<xsd:element maxOccurs="1" minOccurs="0" name="float" nillable="true" type="xsd:float"/>
<xsd:element maxOccurs="1" minOccurs="0" name="double" nillable="true" type="xsd:double"/>
<xsd:element maxOccurs="1" minOccurs="0" name="dt" nillable="true" type="xsd:dateTime"/>
<xsd:element maxOccurs="1" minOccurs="0" name="shape" nillable="true" type="gml:PointPropertyType"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:element name="my_layer" substitutionGroup="gml:_Feature" type="foo:my_layerType"/>
</xsd:schema>
""")
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_json?OUTPUTFORMAT=application/json&SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=my_layer&STARTINDEX=0&COUNT=2',
"""{"type":"FeatureCollection",
"totalFeatures":"unknown",
"features":[{"type":"Feature","id":"my_layer.1",
"geometry":{"type":"Point","coordinates":[2, 49]},
"properties":{"str":"str"}}]}
""")
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
# We currently invert... A bit weird. See comment in code. Probably inappropriate
if f.str != 'str' or f.GetGeometryRef().ExportToWkt() != 'POINT (49 2)':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
f= lyr.GetNextFeature()
if f is not None:
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs200_multipart():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_multipart?SERVICE=WFS&REQUEST=GetCapabilities',
"""<WFS_Capabilities version="2.0.0">
<FeatureTypeList>
<FeatureType>
<Name>my_layer</Name>
<DefaultSRS>urn:ogc:def:crs:EPSG::4326</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180.0 -90.0</ows:LowerCorner>
<ows:UpperCorner>180.0 90.0</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
</FeatureTypeList>
</WFS_Capabilities>
""")
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs200_endpoint_multipart?OUTPUTFORMAT=multipart')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_multipart?SERVICE=WFS&VERSION=2.0.0&REQUEST=DescribeFeatureType&TYPENAME=my_layer',
"""<xsd:schema xmlns:foo="http://foo" xmlns:gml="http://www.opengis.net/gml" xmlns:xsd="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified" targetNamespace="http://foo">
<xsd:import namespace="http://www.opengis.net/gml" schemaLocation="http://foo/schemas/gml/3.2.1/base/gml.xsd"/>
<xsd:complexType name="my_layerType">
<xsd:complexContent>
<xsd:extension base="gml:AbstractFeatureType">
<xsd:sequence>
<xsd:element maxOccurs="1" minOccurs="0" name="str" nillable="true" type="xsd:string"/>
<xsd:element maxOccurs="1" minOccurs="0" name="shape" nillable="true" type="gml:PointPropertyType"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:element name="my_layer" substitutionGroup="gml:_Feature" type="foo:my_layerType"/>
</xsd:schema>
""")
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_multipart?OUTPUTFORMAT=multipart&SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=my_layer',
"""Content-Type: multipart/mixed; boundary="my_boundary"
\r
\r
--my_boundary
Content-Type: text/plain; charset=us-ascii
Content-Disposition: attachment; filename=my.json
\r
{
"type":"FeatureCollection",
"totalFeatures":"unknown",
"features":[
{
"type":"Feature",
"id":"my_layer.1",
"geometry":{"type":"Point","coordinates":[2, 49]},
"properties":{"str":"str"}
}
]
}
--my_boundary--
""")
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
# We currently invert... A bit weird. See comment in code. Probably inappropriate
if f.str != 'str' or f.GetGeometryRef().ExportToWkt() != 'POINT (49 2)':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
ds = ogr.Open('WFS:/vsimem/wfs200_endpoint_multipart?OUTPUTFORMAT=multipart')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_multipart?OUTPUTFORMAT=multipart&SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=my_layer',
"""Content-Type: multipart/mixed; boundary="my_boundary"
\r
\r
--my_boundary
\r
{
"type":"FeatureCollection",
"totalFeatures":"unknown",
"features":[
{
"type":"Feature",
"id":"my_layer.1",
"geometry":{"type":"Point","coordinates":[2, 49]},
"properties":{"str":"str"}
}
]
}
--my_boundary--
""")
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
ds = ogr.Open('WFS:/vsimem/wfs200_endpoint_multipart?OUTPUTFORMAT=multipart')
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_multipart?OUTPUTFORMAT=multipart&SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=my_layer',
"""Content-Type: multipart/mixed; boundary="my_boundary"
\r
\r
--my_boundary
Content-Disposition: attachment; filename=my.csvt
\r
String,String
--my_boundary
Content-Disposition: attachment; filename=my.csv
\r
str,WKT
str,"POINT(2 49)"
--my_boundary--
""")
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
# We currently invert... A bit weird. See comment in code. Probably inappropriate
if f.str != 'str' or f.GetGeometryRef().ExportToWkt() != 'POINT (49 2)':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs200_join():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&REQUEST=GetCapabilities',
"""<WFS_Capabilities version="2.0.0">
<OperationsMetadata>
<ows:Operation name="GetFeature">
<ows:Constraint name="CountDefault">
<ows:NoValues/>
<ows:DefaultValue>1</ows:DefaultValue>
</ows:Constraint>
</ows:Operation>
<ows:Constraint name="ImplementsResultPaging">
<ows:NoValues/><ows:DefaultValue>TRUE</ows:DefaultValue>
</ows:Constraint>
<ows:Constraint name="ImplementsStandardJoins">
<ows:NoValues/><ows:DefaultValue>TRUE</ows:DefaultValue>
</ows:Constraint>
</OperationsMetadata>
<FeatureTypeList>
<FeatureType>
<Name>lyr1</Name>
<DefaultSRS>urn:ogc:def:crs:EPSG::4326</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180.0 -90.0</ows:LowerCorner>
<ows:UpperCorner>180.0 90.0</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
<FeatureType>
<Name>lyr2</Name>
<DefaultSRS>urn:ogc:def:crs:EPSG::4326</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180.0 -90.0</ows:LowerCorner>
<ows:UpperCorner>180.0 90.0</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
</FeatureTypeList>
</WFS_Capabilities>
""")
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=DescribeFeatureType&TYPENAME=lyr1,lyr2',
"""<xsd:schema xmlns:foo="http://foo" xmlns:gml="http://www.opengis.net/gml" xmlns:xsd="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified" targetNamespace="http://foo">
<xsd:import namespace="http://www.opengis.net/gml" schemaLocation="http://foo/schemas/gml/3.2.1/base/gml.xsd"/>
<xsd:complexType name="lyr1Type">
<xsd:complexContent>
<xsd:extension base="gml:AbstractFeatureType">
<xsd:sequence>
<xsd:element maxOccurs="1" minOccurs="0" name="str" nillable="true" type="xsd:string"/>
<xsd:element maxOccurs="1" minOccurs="0" name="shape" nillable="true" type="gml:PointPropertyType"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:element name="lyr1" substitutionGroup="gml:_Feature" type="foo:lyr1Type"/>
<xsd:complexType name="lyr2Type">
<xsd:complexContent>
<xsd:extension base="gml:AbstractFeatureType">
<xsd:sequence>
<xsd:element maxOccurs="1" minOccurs="0" name="str2" nillable="true" type="xsd:string"/>
<xsd:element maxOccurs="1" minOccurs="0" name="another_shape" nillable="true" type="gml:PointPropertyType"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:element name="lyr2" substitutionGroup="gml:_Feature" type="foo:lyr2Type"/>
</xsd:schema>
""")
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs200_endpoint_join')
sql_lyr = ds.ExecuteSQL('SELECT * FROM lyr1 JOIN lyr2 ON lyr1.str = lyr2.str2')
gdal.PushErrorHandler()
f = sql_lyr.GetNextFeature()
gdal.PopErrorHandler()
if f is not None:
gdaltest.post_reason('fail')
return 'fail'
ds.ReleaseResultSet(sql_lyr)
ds = ogr.Open('WFS:/vsimem/wfs200_endpoint_join')
sql_lyr = ds.ExecuteSQL('SELECT * FROM lyr1 JOIN lyr2 ON lyr1.str = lyr2.str2')
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=%28lyr1,lyr2%29&STARTINDEX=0&COUNT=1&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Ffes%2F2.0%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%2F3.2%22%3E%3CPropertyIsEqualTo%3E%3CValueReference%3Elyr1%2Fstr%3C%2FValueReference%3E%3CValueReference%3Elyr2%2Fstr2%3C%2FValueReference%3E%3C%2FPropertyIsEqualTo%3E%3C%2FFilter%3E',
"""""")
gdal.PushErrorHandler()
f = sql_lyr.GetNextFeature()
gdal.PopErrorHandler()
if f is not None or gdal.GetLastErrorMsg().find('Empty content returned by server') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
ds.ReleaseResultSet(sql_lyr)
ds = ogr.Open('WFS:/vsimem/wfs200_endpoint_join')
sql_lyr = ds.ExecuteSQL('SELECT * FROM lyr1 JOIN lyr2 ON lyr1.str = lyr2.str2')
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=%28lyr1,lyr2%29&STARTINDEX=0&COUNT=1&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Ffes%2F2.0%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%2F3.2%22%3E%3CPropertyIsEqualTo%3E%3CValueReference%3Elyr1%2Fstr%3C%2FValueReference%3E%3CValueReference%3Elyr2%2Fstr2%3C%2FValueReference%3E%3C%2FPropertyIsEqualTo%3E%3C%2FFilter%3E',
"""<ServiceExceptionReport/>""")
gdal.PushErrorHandler()
f = sql_lyr.GetNextFeature()
gdal.PopErrorHandler()
if f is not None or gdal.GetLastErrorMsg().find('Error returned by server') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
ds.ReleaseResultSet(sql_lyr)
ds = ogr.Open('WFS:/vsimem/wfs200_endpoint_join')
sql_lyr = ds.ExecuteSQL('SELECT * FROM lyr1 JOIN lyr2 ON lyr1.str = lyr2.str2')
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=%28lyr1,lyr2%29&STARTINDEX=0&COUNT=1&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Ffes%2F2.0%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%2F3.2%22%3E%3CPropertyIsEqualTo%3E%3CValueReference%3Elyr1%2Fstr%3C%2FValueReference%3E%3CValueReference%3Elyr2%2Fstr2%3C%2FValueReference%3E%3C%2FPropertyIsEqualTo%3E%3C%2FFilter%3E',
"""<invalid_xml""")
gdal.PushErrorHandler()
f = sql_lyr.GetNextFeature()
gdal.PopErrorHandler()
if f is not None or gdal.GetLastErrorMsg().find('Error: cannot parse') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
ds.ReleaseResultSet(sql_lyr)
ds = ogr.Open('WFS:/vsimem/wfs200_endpoint_join')
sql_lyr = ds.ExecuteSQL('SELECT * FROM lyr1 JOIN lyr2 ON lyr1.str = lyr2.str2')
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=%28lyr1,lyr2%29&STARTINDEX=0&COUNT=1&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Ffes%2F2.0%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%2F3.2%22%3E%3CPropertyIsEqualTo%3E%3CValueReference%3Elyr1%2Fstr%3C%2FValueReference%3E%3CValueReference%3Elyr2%2Fstr2%3C%2FValueReference%3E%3C%2FPropertyIsEqualTo%3E%3C%2FFilter%3E',
"""<dummy_xml/>""")
gdal.PushErrorHandler()
f = sql_lyr.GetNextFeature()
gdal.PopErrorHandler()
if f is not None or gdal.GetLastErrorMsg().find('Error: cannot parse') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
ds.ReleaseResultSet(sql_lyr)
ds = ogr.Open('WFS:/vsimem/wfs200_endpoint_join')
sql_lyr = ds.ExecuteSQL('SELECT * FROM lyr1 JOIN lyr2 ON lyr1.str = lyr2.str2')
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=%28lyr1,lyr2%29&STARTINDEX=0&COUNT=1&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Ffes%2F2.0%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%2F3.2%22%3E%3CPropertyIsEqualTo%3E%3CValueReference%3Elyr1%2Fstr%3C%2FValueReference%3E%3CValueReference%3Elyr2%2Fstr2%3C%2FValueReference%3E%3C%2FPropertyIsEqualTo%3E%3C%2FFilter%3E',
"""<?xml version="1.0" encoding="UTF-8"?>
<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:foo="http://foo"
xmlns:wfs="http://www.opengis.net/wfs/2.0"
xmlns:gml="http://www.opengis.net/gml/3.2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberMatched="unknown" numberReturned="1" timeStamp="2015-01-01T00:00:00.000Z"
xsi:schemaLocation="http://www.opengis.net/gml/3.2 http://schemas.opengis.net/gml/3.2.1/gml.xsd
http://www.opengis.net/wfs/2.0 http://schemas.opengis.net/wfs/2.0/wfs.xsd
http://foo /vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=DescribeFeatureType&TYPENAME=lyr1,lyr2">
<wfs:member>
<wfs:Tuple>
<wfs:member>
<foo:lyr1 gml:id="lyr1-100">
<foo:str>123.4</foo:str>
<foo:shape><gml:Point srsName="urn:ogc:def:crs:EPSG::4326" gml:id="bla"><gml:pos>48.5 2.5</gml:pos></gml:Point></foo:shape>
</foo:lyr1>
</wfs:member>
<wfs:member>
<foo:lyr2 gml:id="lyr2-101">
<foo:str2>123.4</foo:str2>
<foo:another_shape><gml:Point srsName="urn:ogc:def:crs:EPSG::4326" gml:id="bla"><gml:pos>49 2</gml:pos></gml:Point></foo:another_shape>
</foo:lyr2>
</wfs:member>
</wfs:Tuple>
</wfs:member>
</wfs:FeatureCollection>
""")
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=%28lyr1,lyr2%29&STARTINDEX=1&COUNT=1&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Ffes%2F2.0%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%2F3.2%22%3E%3CPropertyIsEqualTo%3E%3CValueReference%3Elyr1%2Fstr%3C%2FValueReference%3E%3CValueReference%3Elyr2%2Fstr2%3C%2FValueReference%3E%3C%2FPropertyIsEqualTo%3E%3C%2FFilter%3E',
"""<?xml version="1.0" encoding="UTF-8"?>
<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:foo="http://foo"
xmlns:wfs="http://www.opengis.net/wfs/2.0"
xmlns:gml="http://www.opengis.net/gml/3.2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberMatched="unknown" numberReturned="1" timeStamp="2015-01-01T00:00:00.000Z"
xsi:schemaLocation="http://www.opengis.net/gml/3.2 http://schemas.opengis.net/gml/3.2.1/gml.xsd
http://www.opengis.net/wfs/2.0 http://schemas.opengis.net/wfs/2.0/wfs.xsd
http://foo /vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=DescribeFeatureType&TYPENAME=lyr1,lyr2">
<wfs:member>
<wfs:Tuple>
<wfs:member>
<foo:lyr1 gml:id="lyr1-101">
<foo:str>foo</foo:str>
<foo:shape><gml:Point srsName="urn:ogc:def:crs:EPSG::4326" gml:id="bla"><gml:pos>48.5 2.5</gml:pos></gml:Point></foo:shape>
</foo:lyr1>
</wfs:member>
<wfs:member>
<foo:lyr2 gml:id="lyr2-102">
<foo:str2>foo</foo:str2>
<foo:another_shape><gml:Point srsName="urn:ogc:def:crs:EPSG::4326" gml:id="bla"><gml:pos>49 2</gml:pos></gml:Point></foo:another_shape>
</foo:lyr2>
</wfs:member>
</wfs:Tuple>
</wfs:member>
</wfs:FeatureCollection>
""")
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=%28lyr1,lyr2%29&STARTINDEX=2&COUNT=1&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Ffes%2F2.0%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%2F3.2%22%3E%3CPropertyIsEqualTo%3E%3CValueReference%3Elyr1%2Fstr%3C%2FValueReference%3E%3CValueReference%3Elyr2%2Fstr2%3C%2FValueReference%3E%3C%2FPropertyIsEqualTo%3E%3C%2FFilter%3E',
"""<?xml version="1.0" encoding="UTF-8"?>
<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:foo="http://foo"
xmlns:wfs="http://www.opengis.net/wfs/2.0"
xmlns:gml="http://www.opengis.net/gml/3.2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberMatched="unknown" numberReturned="0" timeStamp="2015-01-01T00:00:00.000Z"
xsi:schemaLocation="http://www.opengis.net/gml/3.2 http://schemas.opengis.net/gml/3.2.1/gml.xsd
http://www.opengis.net/wfs/2.0 http://schemas.opengis.net/wfs/2.0/wfs.xsd
http://foo /vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=DescribeFeatureType&TYPENAME=lyr1,lyr2">
</wfs:FeatureCollection>
""")
f = sql_lyr.GetNextFeature()
if f['lyr1.gml_id'] != 'lyr1-100' or f['lyr1.str'] != '123.4' or \
f['lyr2.gml_id'] != 'lyr2-101' or f['lyr2.str2'] != '123.4' or \
f['lyr1.shape'].ExportToWkt() != 'POINT (2.5 48.5)' or \
f['lyr2.another_shape'].ExportToWkt() != 'POINT (2 49)':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
f = sql_lyr.GetNextFeature()
if f['lyr1.gml_id'] != 'lyr1-101' or f['lyr1.str'] != 'foo' or \
f['lyr2.gml_id'] != 'lyr2-102' or f['lyr2.str2'] != 'foo' or \
f['lyr1.shape'].ExportToWkt() != 'POINT (2.5 48.5)' or \
f['lyr2.another_shape'].ExportToWkt() != 'POINT (2 49)':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
f = sql_lyr.GetNextFeature()
if f is not None:
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
sql_lyr.ResetReading()
sql_lyr.ResetReading()
f = sql_lyr.GetNextFeature()
if f['lyr1.gml_id'] != 'lyr1-100':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
gdal.PushErrorHandler('CPLQuietErrorHandler')
fc = sql_lyr.GetFeatureCount()
gdal.PopErrorHandler()
if fc != 2:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
print(fc)
return 'fail'
# Empty content returned by server
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=%28lyr1,lyr2%29&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Ffes%2F2.0%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%2F3.2%22%3E%3CPropertyIsEqualTo%3E%3CValueReference%3Elyr1%2Fstr%3C%2FValueReference%3E%3CValueReference%3Elyr2%2Fstr2%3C%2FValueReference%3E%3C%2FPropertyIsEqualTo%3E%3C%2FFilter%3E&RESULTTYPE=hits',
"""""")
gdal.PushErrorHandler('CPLQuietErrorHandler')
fc = sql_lyr.GetFeatureCount()
gdal.PopErrorHandler()
if fc != 2:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
# Invalid XML
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=%28lyr1,lyr2%29&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Ffes%2F2.0%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%2F3.2%22%3E%3CPropertyIsEqualTo%3E%3CValueReference%3Elyr1%2Fstr%3C%2FValueReference%3E%3CValueReference%3Elyr2%2Fstr2%3C%2FValueReference%3E%3C%2FPropertyIsEqualTo%3E%3C%2FFilter%3E&RESULTTYPE=hits',
"""<invalid_xml""")
gdal.PushErrorHandler('CPLQuietErrorHandler')
fc = sql_lyr.GetFeatureCount()
gdal.PopErrorHandler()
if fc != 2:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
# Server exception
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=%28lyr1,lyr2%29&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Ffes%2F2.0%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%2F3.2%22%3E%3CPropertyIsEqualTo%3E%3CValueReference%3Elyr1%2Fstr%3C%2FValueReference%3E%3CValueReference%3Elyr2%2Fstr2%3C%2FValueReference%3E%3C%2FPropertyIsEqualTo%3E%3C%2FFilter%3E&RESULTTYPE=hits',
"""<ServiceExceptionReport/>""")
gdal.PushErrorHandler('CPLQuietErrorHandler')
fc = sql_lyr.GetFeatureCount()
gdal.PopErrorHandler()
if fc != 2:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
# Missing FeatureCollection
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=%28lyr1,lyr2%29&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Ffes%2F2.0%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%2F3.2%22%3E%3CPropertyIsEqualTo%3E%3CValueReference%3Elyr1%2Fstr%3C%2FValueReference%3E%3CValueReference%3Elyr2%2Fstr2%3C%2FValueReference%3E%3C%2FPropertyIsEqualTo%3E%3C%2FFilter%3E&RESULTTYPE=hits',
"""<dummy_xml/>""")
gdal.PushErrorHandler('CPLQuietErrorHandler')
fc = sql_lyr.GetFeatureCount()
gdal.PopErrorHandler()
if fc != 2:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
# Missing FeatureCollection.numberMatched
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=%28lyr1,lyr2%29&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Ffes%2F2.0%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%2F3.2%22%3E%3CPropertyIsEqualTo%3E%3CValueReference%3Elyr1%2Fstr%3C%2FValueReference%3E%3CValueReference%3Elyr2%2Fstr2%3C%2FValueReference%3E%3C%2FPropertyIsEqualTo%3E%3C%2FFilter%3E&RESULTTYPE=hits',
"""<FeatureCollection/>""")
gdal.PushErrorHandler('CPLQuietErrorHandler')
fc = sql_lyr.GetFeatureCount()
gdal.PopErrorHandler()
if fc != 2:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
# Valid
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=%28lyr1,lyr2%29&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Ffes%2F2.0%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%2F3.2%22%3E%3CPropertyIsEqualTo%3E%3CValueReference%3Elyr1%2Fstr%3C%2FValueReference%3E%3CValueReference%3Elyr2%2Fstr2%3C%2FValueReference%3E%3C%2FPropertyIsEqualTo%3E%3C%2FFilter%3E&RESULTTYPE=hits',
"""<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:ogc="http://www.opengis.net/ogc"
xmlns:foo="http://foo"
xmlns:wfs="http://www.opengis.net/wfs/2.0"
xmlns:ows="http://www.opengis.net/ows"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:gml="http://www.opengis.net/gml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberMatched="3"
timeStamp="2015-04-17T14:14:24.859Z"
xsi:schemaLocation="http://foo blabla
http://www.opengis.net/wfs/2.0 http://schemas.opengis.net/wfs/2.0/wfs.xsd">
</wfs:FeatureCollection>""")
gdal.PushErrorHandler('CPLQuietErrorHandler')
fc = sql_lyr.GetFeatureCount()
gdal.PopErrorHandler()
if fc != 3:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
sql_lyr.TestCapability('foo')
sql_lyr.GetLayerDefn()
# Test filters (nt supported)
sql_lyr.SetAttributeFilter(None)
gdal.PushErrorHandler()
sql_lyr.SetAttributeFilter('"lyr1.gml_id" IS NOT NULL')
gdal.PopErrorHandler()
sql_lyr.SetSpatialFilter(None)
gdal.PushErrorHandler()
sql_lyr.SetSpatialFilterRect(0,0,0,0)
gdal.PopErrorHandler()
ds.ReleaseResultSet(sql_lyr)
ds = ogr.Open('WFS:/vsimem/wfs200_endpoint_join')
sql_lyr = ds.ExecuteSQL('SELECT lyr1.*, lyr2.* FROM lyr1 JOIN lyr2 ON lyr1.str = lyr2.str2')
f = sql_lyr.GetNextFeature()
if f['lyr1.gml_id'] != 'lyr1-100' or f['lyr1.str'] != '123.4' or \
f['lyr2.gml_id'] != 'lyr2-101' or f['lyr2.str2'] != '123.4' or \
f['lyr1.shape'].ExportToWkt() != 'POINT (2.5 48.5)' or \
f['lyr2.another_shape'].ExportToWkt() != 'POINT (2 49)':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
ds.ReleaseResultSet(sql_lyr)
ds = ogr.Open('WFS:/vsimem/wfs200_endpoint_join')
sql_lyr = ds.ExecuteSQL('SELECT * FROM lyr1 my_alias1 JOIN lyr2 ON my_alias1.str = lyr2.str2')
f = sql_lyr.GetNextFeature()
if f['my_alias1.gml_id'] != 'lyr1-100' or f['my_alias1.str'] != '123.4' or \
f['lyr2.gml_id'] != 'lyr2-101' or f['lyr2.str2'] != '123.4' or \
f['my_alias1.shape'].ExportToWkt() != 'POINT (2.5 48.5)' or \
f['lyr2.another_shape'].ExportToWkt() != 'POINT (2 49)':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
ds.ReleaseResultSet(sql_lyr)
ds = ogr.Open('WFS:/vsimem/wfs200_endpoint_join')
sql_lyr = ds.ExecuteSQL('SELECT my_alias1.gml_id as gml_id1, ' + \
'CAST(my_alias1.str AS integer) AS str_int, ' + \
'CAST(my_alias1.str AS bigint) AS str_bigint, ' + \
'CAST(my_alias1.str AS float) AS str_float, ' + \
'my_alias1.shape AS myshape ' + \
'FROM lyr1 my_alias1 JOIN lyr2 ON my_alias1.str = lyr2.str2')
f = sql_lyr.GetNextFeature()
if f['gml_id1'] != 'lyr1-100' or \
f['str_int'] != 123 or \
f['str_bigint'] != 123 or \
f['str_float'] != 123.4 or \
f['myshape'].ExportToWkt() != 'POINT (2.5 48.5)':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
ds.ReleaseResultSet(sql_lyr)
ds = ogr.Open('WFS:/vsimem/wfs200_endpoint_join')
sql_lyr = ds.ExecuteSQL("SELECT * FROM lyr1 JOIN lyr2 ON lyr1.str = lyr2.str2 WHERE lyr2.str2 = '123.4'")
content = """<?xml version="1.0" encoding="UTF-8"?>
<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:foo="http://foo"
xmlns:wfs="http://www.opengis.net/wfs/2.0"
xmlns:gml="http://www.opengis.net/gml/3.2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberMatched="unknown" numberReturned="1" timeStamp="2015-01-01T00:00:00.000Z"
xsi:schemaLocation="http://www.opengis.net/gml/3.2 http://schemas.opengis.net/gml/3.2.1/gml.xsd
http://www.opengis.net/wfs/2.0 http://schemas.opengis.net/wfs/2.0/wfs.xsd
http://foo /vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=DescribeFeatureType&TYPENAME=lyr1,lyr2">
<wfs:member>
<wfs:Tuple>
<wfs:member>
<foo:lyr1 gml:id="lyr1-100">
<foo:str>123.4</foo:str>
<foo:shape><gml:Point srsName="urn:ogc:def:crs:EPSG::4326" gml:id="bla"><gml:pos>48.5 2.5</gml:pos></gml:Point></foo:shape>
</foo:lyr1>
</wfs:member>
<wfs:member>
<foo:lyr2 gml:id="lyr2-101">
<foo:str2>123.4</foo:str2>
<foo:another_shape><gml:Point srsName="urn:ogc:def:crs:EPSG::4326" gml:id="bla"><gml:pos>49 2</gml:pos></gml:Point></foo:another_shape>
</foo:lyr2>
</wfs:member>
</wfs:Tuple>
</wfs:member>
</wfs:FeatureCollection>
"""
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=%28lyr1,lyr2%29&STARTINDEX=0&COUNT=1&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Ffes%2F2.0%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%2F3.2%22%3E%3CAnd%3E%3CPropertyIsEqualTo%3E%3CValueReference%3Elyr1%2Fstr%3C%2FValueReference%3E%3CValueReference%3Elyr2%2Fstr2%3C%2FValueReference%3E%3C%2FPropertyIsEqualTo%3E%3CPropertyIsEqualTo%3E%3CValueReference%3Elyr2%2Fstr2%3C%2FValueReference%3E%3CLiteral%3E123.4%3C%2FLiteral%3E%3C%2FPropertyIsEqualTo%3E%3C%2FAnd%3E%3C%2FFilter%3E',
content)
f = sql_lyr.GetNextFeature()
if f['lyr1.gml_id'] != 'lyr1-100' or f['lyr1.str'] != '123.4' or \
f['lyr2.gml_id'] != 'lyr2-101' or f['lyr2.str2'] != '123.4' or \
f['lyr1.shape'].ExportToWkt() != 'POINT (2.5 48.5)' or \
f['lyr2.another_shape'].ExportToWkt() != 'POINT (2 49)':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
ds.ReleaseResultSet(sql_lyr)
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=%28lyr1,lyr2%29&STARTINDEX=0&COUNT=1&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Ffes%2F2.0%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%2F3.2%22%3E%3CAnd%3E%3CPropertyIsEqualTo%3E%3CValueReference%3Elyr1%2Fstr%3C%2FValueReference%3E%3CValueReference%3Elyr2%2Fstr2%3C%2FValueReference%3E%3C%2FPropertyIsEqualTo%3E%3CWithin%3E%3CValueReference%3Elyr2%2Fanother_shape%3C%2FValueReference%3E%3Cgml:Envelope%20srsName%3D%22urn:ogc:def:crs:EPSG::4326%22%3E%3Cgml:lowerCorner%3E%2D90%20%2D180%3C%2Fgml:lowerCorner%3E%3Cgml:upperCorner%3E90%20180%3C%2Fgml:upperCorner%3E%3C%2Fgml:Envelope%3E%3C%2FWithin%3E%3C%2FAnd%3E%3C%2FFilter%3E',
content)
sql_lyr = ds.ExecuteSQL("SELECT * FROM lyr1 JOIN lyr2 ON lyr1.str = lyr2.str2 WHERE ST_Within(lyr2.another_shape, ST_MakeEnvelope(-180,-90,180,90))")
f = sql_lyr.GetNextFeature()
if f['lyr1.gml_id'] != 'lyr1-100':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
ds.ReleaseResultSet(sql_lyr)
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=%28lyr1,lyr2%29&STARTINDEX=0&COUNT=1&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Ffes%2F2.0%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%2F3.2%22%3E%3CPropertyIsEqualTo%3E%3CValueReference%3Elyr1%2Fstr%3C%2FValueReference%3E%3CValueReference%3Elyr2%2Fstr2%3C%2FValueReference%3E%3C%2FPropertyIsEqualTo%3E%3C%2FFilter%3E&SORTBY=str%20DESC',
content)
sql_lyr = ds.ExecuteSQL("SELECT * FROM lyr1 JOIN lyr2 ON lyr1.str = lyr2.str2 ORDER BY lyr1.str DESC")
f = sql_lyr.GetNextFeature()
if f['lyr1.gml_id'] != 'lyr1-100':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
ds.ReleaseResultSet(sql_lyr)
gdal.PushErrorHandler()
sql_lyr = ds.ExecuteSQL("SELECT * FROM lyr1 JOIN lyr2 ON lyr1.str = lyr2.str2 WHERE lyr1.OGR_GEOMETRY IS NOT NULL")
gdal.PopErrorHandler()
if sql_lyr is not None or gdal.GetLastErrorMsg().find('Unsupported WHERE clause') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.PushErrorHandler()
sql_lyr = ds.ExecuteSQL("SELECT * FROM lyr1 JOIN lyr2 ON lyr1.OGR_GEOMETRY IS NOT NULL")
gdal.PopErrorHandler()
if sql_lyr is not None or gdal.GetLastErrorMsg().find('Unsupported JOIN clause') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.PushErrorHandler()
sql_lyr = ds.ExecuteSQL("SELECT 1 FROM lyr1 JOIN lyr2 ON lyr1.str = lyr2.str2")
gdal.PopErrorHandler()
if sql_lyr is not None or gdal.GetLastErrorMsg().find('Only column names supported in column selection') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
ds = None
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs200_join_layer_with_namespace_prefix():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&REQUEST=GetCapabilities',
"""<WFS_Capabilities version="2.0.0">
<OperationsMetadata>
<ows:Operation name="GetFeature">
<ows:Constraint name="CountDefault">
<ows:NoValues/>
<ows:DefaultValue>1</ows:DefaultValue>
</ows:Constraint>
</ows:Operation>
<ows:Constraint name="ImplementsResultPaging">
<ows:NoValues/><ows:DefaultValue>TRUE</ows:DefaultValue>
</ows:Constraint>
<ows:Constraint name="ImplementsStandardJoins">
<ows:NoValues/><ows:DefaultValue>TRUE</ows:DefaultValue>
</ows:Constraint>
</OperationsMetadata>
<FeatureTypeList>
<FeatureType xmlns:foo="http://foo">
<Name>foo:lyr1</Name>
<DefaultSRS>urn:ogc:def:crs:EPSG::4326</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180.0 -90.0</ows:LowerCorner>
<ows:UpperCorner>180.0 90.0</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
<FeatureType xmlns:foo="http://foo">
<Name>foo:lyr2</Name>
<DefaultSRS>urn:ogc:def:crs:EPSG::4326</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180.0 -90.0</ows:LowerCorner>
<ows:UpperCorner>180.0 90.0</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
</FeatureTypeList>
</WFS_Capabilities>
""")
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=DescribeFeatureType&TYPENAME=foo:lyr1,foo:lyr2',
"""<xsd:schema xmlns:foo="http://foo" xmlns:gml="http://www.opengis.net/gml" xmlns:xsd="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified" targetNamespace="http://foo">
<xsd:import namespace="http://www.opengis.net/gml" schemaLocation="http://foo/schemas/gml/3.2.1/base/gml.xsd"/>
<xsd:complexType name="lyr1Type">
<xsd:complexContent>
<xsd:extension base="gml:AbstractFeatureType">
<xsd:sequence>
<xsd:element maxOccurs="1" minOccurs="0" name="str" nillable="true" type="xsd:string"/>
<xsd:element maxOccurs="1" minOccurs="0" name="shape" nillable="true" type="gml:PointPropertyType"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:element name="lyr1" substitutionGroup="gml:_Feature" type="foo:lyr1Type"/>
<xsd:complexType name="lyr2Type">
<xsd:complexContent>
<xsd:extension base="gml:AbstractFeatureType">
<xsd:sequence>
<xsd:element maxOccurs="1" minOccurs="0" name="str2" nillable="true" type="xsd:string"/>
<xsd:element maxOccurs="1" minOccurs="0" name="another_shape" nillable="true" type="gml:PointPropertyType"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:element name="lyr2" substitutionGroup="gml:_Feature" type="foo:lyr2Type"/>
</xsd:schema>
""")
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs200_endpoint_join')
sql_lyr = ds.ExecuteSQL('SELECT * FROM lyr1 JOIN lyr2 ON lyr1.str = lyr2.str2')
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=%28foo:lyr1,foo:lyr2%29&STARTINDEX=0&COUNT=1&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Ffes%2F2.0%22%20xmlns:foo%3D%22http:%2F%2Ffoo%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%2F3.2%22%3E%3CPropertyIsEqualTo%3E%3CValueReference%3Efoo:lyr1%2Fstr%3C%2FValueReference%3E%3CValueReference%3Efoo:lyr2%2Fstr2%3C%2FValueReference%3E%3C%2FPropertyIsEqualTo%3E%3C%2FFilter%3E',
"""<?xml version="1.0" encoding="UTF-8"?>
<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:foo="http://foo"
xmlns:wfs="http://www.opengis.net/wfs/2.0"
xmlns:gml="http://www.opengis.net/gml/3.2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberMatched="unknown" numberReturned="1" timeStamp="2015-01-01T00:00:00.000Z"
xsi:schemaLocation="http://www.opengis.net/gml/3.2 http://schemas.opengis.net/gml/3.2.1/gml.xsd
http://www.opengis.net/wfs/2.0 http://schemas.opengis.net/wfs/2.0/wfs.xsd
http://foo /vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=DescribeFeatureType&TYPENAME=lyr1,lyr2">
<wfs:member>
<wfs:Tuple>
<wfs:member>
<foo:lyr1 gml:id="lyr1-100">
<foo:str>123.4</foo:str>
<foo:shape><gml:Point srsName="urn:ogc:def:crs:EPSG::4326" gml:id="bla"><gml:pos>48.5 2.5</gml:pos></gml:Point></foo:shape>
</foo:lyr1>
</wfs:member>
<wfs:member>
<foo:lyr2 gml:id="lyr2-101">
<foo:str2>123.4</foo:str2>
<foo:another_shape><gml:Point srsName="urn:ogc:def:crs:EPSG::4326" gml:id="bla"><gml:pos>49 2</gml:pos></gml:Point></foo:another_shape>
</foo:lyr2>
</wfs:member>
</wfs:Tuple>
</wfs:member>
</wfs:FeatureCollection>
""")
f = sql_lyr.GetNextFeature()
if f['lyr1.gml_id'] != 'lyr1-100' or f['lyr1.str'] != '123.4' or \
f['lyr2.gml_id'] != 'lyr2-101' or f['lyr2.str2'] != '123.4' or \
f['lyr1.shape'].ExportToWkt() != 'POINT (2.5 48.5)' or \
f['lyr2.another_shape'].ExportToWkt() != 'POINT (2 49)':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
ds.ReleaseResultSet(sql_lyr)
return 'success'
###############################################################################
def ogr_wfs_vsimem_wfs200_join_distinct():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&REQUEST=GetCapabilities',
"""<WFS_Capabilities version="2.0.0">
<OperationsMetadata>
<ows:Operation name="GetFeature">
<ows:Constraint name="CountDefault">
<ows:NoValues/>
<ows:DefaultValue>4</ows:DefaultValue>
</ows:Constraint>
</ows:Operation>
<ows:Constraint name="ImplementsResultPaging">
<ows:NoValues/><ows:DefaultValue>TRUE</ows:DefaultValue>
</ows:Constraint>
<ows:Constraint name="ImplementsStandardJoins">
<ows:NoValues/><ows:DefaultValue>TRUE</ows:DefaultValue>
</ows:Constraint>
</OperationsMetadata>
<FeatureTypeList>
<FeatureType xmlns:foo="http://foo">
<Name>foo:lyr1</Name>
<DefaultSRS>urn:ogc:def:crs:EPSG::4326</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180.0 -90.0</ows:LowerCorner>
<ows:UpperCorner>180.0 90.0</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
<FeatureType xmlns:foo="http://foo">
<Name>foo:lyr2</Name>
<DefaultSRS>urn:ogc:def:crs:EPSG::4326</DefaultSRS>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180.0 -90.0</ows:LowerCorner>
<ows:UpperCorner>180.0 90.0</ows:UpperCorner>
</ows:WGS84BoundingBox>
</FeatureType>
</FeatureTypeList>
</WFS_Capabilities>
""")
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=DescribeFeatureType&TYPENAME=foo:lyr1,foo:lyr2',
"""<xsd:schema xmlns:foo="http://foo" xmlns:gml="http://www.opengis.net/gml" xmlns:xsd="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified" targetNamespace="http://foo">
<xsd:import namespace="http://www.opengis.net/gml" schemaLocation="http://foo/schemas/gml/3.2.1/base/gml.xsd"/>
<xsd:complexType name="lyr1Type">
<xsd:complexContent>
<xsd:extension base="gml:AbstractFeatureType">
<xsd:sequence>
<xsd:element maxOccurs="1" minOccurs="0" name="str" nillable="true" type="xsd:string"/>
<xsd:element maxOccurs="1" minOccurs="0" name="int" nillable="true" type="xsd:int"/>
<xsd:element maxOccurs="1" minOccurs="0" name="int64" nillable="true" type="xsd:long"/>
<xsd:element maxOccurs="1" minOccurs="0" name="double" nillable="true" type="xsd:double"/>
<xsd:element maxOccurs="1" minOccurs="0" name="dt" nillable="true" type="xsd:dateTime"/>
<xsd:element maxOccurs="1" minOccurs="0" name="shape" nillable="true" type="gml:PointPropertyType"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:element name="lyr1" substitutionGroup="gml:_Feature" type="foo:lyr1Type"/>
<xsd:complexType name="lyr2Type">
<xsd:complexContent>
<xsd:extension base="gml:AbstractFeatureType">
<xsd:sequence>
<xsd:element maxOccurs="1" minOccurs="0" name="str2" nillable="true" type="xsd:string"/>
<xsd:element maxOccurs="1" minOccurs="0" name="another_str" nillable="true" type="xsd:string"/>
<xsd:element maxOccurs="1" minOccurs="0" name="another_shape" nillable="true" type="gml:PointPropertyType"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:element name="lyr2" substitutionGroup="gml:_Feature" type="foo:lyr2Type"/>
</xsd:schema>
""")
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', 'YES')
ds = ogr.Open('WFS:/vsimem/wfs200_endpoint_join')
sql_lyr = ds.ExecuteSQL('SELECT DISTINCT lyr1.str, lyr1.int, lyr1.int64, lyr1.double, lyr1.dt, lyr2.another_shape FROM lyr1 JOIN lyr2 ON lyr1.str = lyr2.str2')
gdal.FileFromMemBuffer('/vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&TYPENAMES=%28foo:lyr1,foo:lyr2%29&STARTINDEX=0&COUNT=4&FILTER=%3CFilter%20xmlns%3D%22http:%2F%2Fwww.opengis.net%2Ffes%2F2.0%22%20xmlns:foo%3D%22http:%2F%2Ffoo%22%20xmlns:gml%3D%22http:%2F%2Fwww.opengis.net%2Fgml%2F3.2%22%3E%3CPropertyIsEqualTo%3E%3CValueReference%3Efoo:lyr1%2Fstr%3C%2FValueReference%3E%3CValueReference%3Efoo:lyr2%2Fstr2%3C%2FValueReference%3E%3C%2FPropertyIsEqualTo%3E%3C%2FFilter%3E',
"""<?xml version="1.0" encoding="UTF-8"?>
<wfs:FeatureCollection xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:foo="http://foo"
xmlns:wfs="http://www.opengis.net/wfs/2.0"
xmlns:gml="http://www.opengis.net/gml/3.2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
numberMatched="unknown" numberReturned="3" timeStamp="2015-01-01T00:00:00.000Z"
xsi:schemaLocation="http://www.opengis.net/gml/3.2 http://schemas.opengis.net/gml/3.2.1/gml.xsd
http://www.opengis.net/wfs/2.0 http://schemas.opengis.net/wfs/2.0/wfs.xsd
http://foo /vsimem/wfs200_endpoint_join?SERVICE=WFS&VERSION=2.0.0&REQUEST=DescribeFeatureType&TYPENAME=lyr1,lyr2">
<wfs:member>
<wfs:Tuple>
<wfs:member>
<foo:lyr1 gml:id="lyr1-1">
<foo:str>foo</foo:str>
<foo:int>1</foo:int>
<foo:int64>9876543210</foo:int64>
<foo:double>123.4</foo:double>
<foo:dt>2015-04-17T12:34:56Z</foo:dt>
<foo:shape><gml:Point srsName="urn:ogc:def:crs:EPSG::4326" gml:id="bla"><gml:pos>48.5 2.5</gml:pos></gml:Point></foo:shape>
</foo:lyr1>
</wfs:member>
<wfs:member>
<foo:lyr2 gml:id="lyr2-1">
<foo:str2>foo</foo:str2>
<foo:another_str>foo</foo:another_str>
<foo:another_shape><gml:Point srsName="urn:ogc:def:crs:EPSG::4326" gml:id="bla"><gml:pos>49 2</gml:pos></gml:Point></foo:another_shape>
</foo:lyr2>
</wfs:member>
</wfs:Tuple>
</wfs:member>
<wfs:member>
<wfs:Tuple>
<wfs:member>
<foo:lyr1 gml:id="lyr1-1">
<foo:str>foo</foo:str>
<foo:int>1</foo:int>
<foo:int64>9876543210</foo:int64>
<foo:double>123.4</foo:double>
<foo:dt>2015-04-17T12:34:56Z</foo:dt>
<foo:shape><gml:Point srsName="urn:ogc:def:crs:EPSG::4326" gml:id="bla"><gml:pos>48.5 2.5</gml:pos></gml:Point></foo:shape>
</foo:lyr1>
</wfs:member>
<wfs:member>
<foo:lyr2 gml:id="lyr2-2">
<foo:str2>foo</foo:str2>
<foo:another_str>bar</foo:another_str>
<foo:another_shape><gml:Point srsName="urn:ogc:def:crs:EPSG::4326" gml:id="bla"><gml:pos>49 2</gml:pos></gml:Point></foo:another_shape>
</foo:lyr2>
</wfs:member>
</wfs:Tuple>
</wfs:member>
<wfs:member>
<wfs:Tuple>
<wfs:member>
<foo:lyr1 gml:id="lyr1-2">
<foo:str>bar</foo:str>
<foo:int>1</foo:int>
<foo:int64>9876543210</foo:int64>
<foo:double>123.4</foo:double>
<foo:dt>2015-04-17T12:34:56Z</foo:dt>
<foo:shape><gml:Point srsName="urn:ogc:def:crs:EPSG::4326" gml:id="bla"><gml:pos>48.5 2.5</gml:pos></gml:Point></foo:shape>
</foo:lyr1>
</wfs:member>
<wfs:member>
<foo:lyr2 gml:id="lyr2-3">
<foo:str2>bar</foo:str2>
<foo:another_str>bar</foo:another_str>
<foo:another_shape><gml:Point srsName="urn:ogc:def:crs:EPSG::4326" gml:id="bla"><gml:pos>49 2</gml:pos></gml:Point></foo:another_shape>
</foo:lyr2>
</wfs:member>
</wfs:Tuple>
</wfs:member>
</wfs:FeatureCollection>
""")
if sql_lyr.GetFeatureCount() != 2:
gdaltest.post_reason('fail')
return 'fail'
ds.ReleaseResultSet(sql_lyr)
return 'success'
###############################################################################
def ogr_wfs_vsimem_cleanup():
if gdaltest.wfs_drv is None:
return 'skip'
gdal.SetConfigOption('CPL_CURL_ENABLE_VSIMEM', None)
for f in gdal.ReadDir('/vsimem/'):
gdal.Unlink('/vsimem/' + f)
return 'success'
gdaltest_live_list = [
#ogr_wfs_mapserver,
#ogr_wfs_geoserver, #FIXME: reenable after adapting test
#ogr_wfs_geoserver_json, #FIXME: reenable after adapting test
#ogr_wfs_geoserver_shapezip, #FIXME: reenable after adapting test
#ogr_wfs_geoserver_paging, #FIXME: reenable after adapting test
ogr_wfs_deegree,
#ogr_wfs_test_ogrsf,
ogr_wfs_fake_wfs_server,
#ogr_wfs_geoserver_wfst, #FIXME: reenable after adapting test
#ogr_wfs_deegree_wfst,
#ogr_wfs_ionic_wfst,
#ogr_wfs_ionic_sql,
ogr_wfs_xmldescriptionfile,
#ogr_wfs_xmldescriptionfile_to_be_updated, #FIXME: reenable after adapting test
ogr_wfs_getcapabilitiesfile,
ogr_wfs_deegree_gml321,
ogr_wfs_deegree_wfs200,
ogr_wfs_deegree_sortby,
#ogr_wfs_esri,
ogr_wfs_esri_2,
ogr_wfs_cubewerx,
ogr_wfs_tinyows,
ogr_wfs_erdas_apollo,
ogr_wfs_intergraph,
ogr_wfs_mapinfo,
ogr_wfs_turn_streaming_off,
ogr_wfs_deegree,
#ogr_wfs_test_ogrsf,
]
gdaltest_vsimem_list = [
ogr_wfs_vsimem_fail_because_not_enabled,
ogr_wfs_vsimem_fail_because_no_get_capabilities,
ogr_wfs_vsimem_fail_because_empty_response,
ogr_wfs_vsimem_fail_because_no_WFS_Capabilities,
ogr_wfs_vsimem_fail_because_exception,
ogr_wfs_vsimem_fail_because_invalid_xml_capabilities,
ogr_wfs_vsimem_fail_because_missing_featuretypelist,
ogr_wfs_vsimem_wfs110_open_getcapabilities_file,
ogr_wfs_vsimem_wfs110_minimal_instance,
ogr_wfs_vsimem_wfs110_one_layer_missing_describefeaturetype,
ogr_wfs_vsimem_wfs110_one_layer_invalid_describefeaturetype,
ogr_wfs_vsimem_wfs110_one_layer_describefeaturetype_missing_schema,
ogr_wfs_vsimem_wfs110_one_layer_describefeaturetype,
ogr_wfs_vsimem_wfs110_one_layer_xmldescriptionfile_to_be_updated,
ogr_wfs_vsimem_wfs110_one_layer_missing_getfeaturecount_no_hits,
ogr_wfs_vsimem_wfs110_one_layer_missing_getfeaturecount_with_hits,
ogr_wfs_vsimem_wfs110_one_layer_invalid_getfeaturecount_with_hits,
ogr_wfs_vsimem_wfs110_one_layer_getfeaturecount_with_hits_missing_FeatureCollection,
ogr_wfs_vsimem_wfs110_one_layer_getfeaturecount_with_hits_invalid_xml,
ogr_wfs_vsimem_wfs110_one_layer_getfeaturecount_with_hits_ServiceExceptionReport,
ogr_wfs_vsimem_wfs110_one_layer_getfeaturecount_with_hits_missing_numberOfFeatures,
ogr_wfs_vsimem_wfs110_one_layer_getfeaturecount_with_hits,
ogr_wfs_vsimem_wfs110_one_layer_missing_getfeature,
ogr_wfs_vsimem_wfs110_one_layer_invalid_getfeature,
ogr_wfs_vsimem_wfs110_one_layer_exception_getfeature,
ogr_wfs_vsimem_wfs110_one_layer_getfeature,
ogr_wfs_vsimem_wfs110_one_layer_getextent,
ogr_wfs_vsimem_wfs110_one_layer_getextent_without_getfeature,
ogr_wfs_vsimem_wfs110_one_layer_getextent_optimized,
ogr_wfs_vsimem_wfs110_one_layer_getfeature_ogr_getfeature,
ogr_wfs_vsimem_wfs110_one_layer_filter_gml_id_failed,
ogr_wfs_vsimem_wfs110_one_layer_filter_gml_id_success,
ogr_wfs_vsimem_wfs110_one_layer_filter,
ogr_wfs_vsimem_wfs110_one_layer_filter_spatial_ops,
ogr_wfs_vsimem_wfs110_one_layer_spatial_filter,
ogr_wfs_vsimem_wfs110_one_layer_spatial_filter_and_attribute_filter,
ogr_wfs_vsimem_wfs110_insertfeature,
ogr_wfs_vsimem_wfs110_updatefeature,
ogr_wfs_vsimem_wfs110_deletefeature,
ogr_wfs_vsimem_wfs110_schema_not_understood,
ogr_wfs_vsimem_wfs110_multiple_layers,
ogr_wfs_vsimem_wfs110_multiple_layers_same_name_different_ns,
ogr_wfs_vsimem_wfs200_paging,
ogr_wfs_vsimem_wfs200_json,
ogr_wfs_vsimem_wfs200_multipart,
ogr_wfs_vsimem_wfs200_join,
ogr_wfs_vsimem_wfs200_join_layer_with_namespace_prefix,
ogr_wfs_vsimem_wfs200_join_distinct,
ogr_wfs_vsimem_cleanup,
]
gdaltest_list = [ ogr_wfs_init ]
gdaltest_list += gdaltest_vsimem_list
gdaltest_list += [ogr_wfs_turn_streaming_off]
gdaltest_list += gdaltest_vsimem_list
gdaltest_list += [ogr_wfs_turn_streaming_on]
gdaltest_list += gdaltest_live_list
if __name__ == '__main__':
gdaltest.setup_run( 'ogr_wfs' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| 38.688007
| 1,781
| 0.645012
| 25,671
| 209,689
| 5.156441
| 0.040318
| 0.023192
| 0.039027
| 0.037561
| 0.913644
| 0.899502
| 0.885662
| 0.86633
| 0.846613
| 0.829842
| 0
| 0.045244
| 0.181955
| 209,689
| 5,419
| 1,782
| 38.695147
| 0.726441
| 0.025447
| 0
| 0.755294
| 0
| 0.043361
| 0.394614
| 0.217165
| 0
| 0
| 0
| 0.000369
| 0
| 1
| 0.026218
| false
| 0.000336
| 0.005042
| 0.002353
| 0.193277
| 0.051092
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
72c85179736b9b8dff41206b2e250153177eb9b5
| 4,855
|
py
|
Python
|
scripts/options.py
|
tomalrussell/pytal
|
b4211277b59320300ba82376ce9c033d83cbafd4
|
[
"MIT"
] | null | null | null |
scripts/options.py
|
tomalrussell/pytal
|
b4211277b59320300ba82376ce9c033d83cbafd4
|
[
"MIT"
] | null | null | null |
scripts/options.py
|
tomalrussell/pytal
|
b4211277b59320300ba82376ce9c033d83cbafd4
|
[
"MIT"
] | null | null | null |
"""
Options consisting of scenarios and strategies.
Written by Ed Oughton
January 2020
#strategy is generation_core_backhaul_sharing
"""
OPTIONS = {
'technology_options': [
{
'scenario': 'S1_30',
'strategy': '4G_epc_microwave_baseline',
'frequencies': [
{'frequency': 800, 'bandwidth': 10},
{'frequency': 2600, 'bandwidth': 10},
],
},
{
'scenario': 'S2_50',
'strategy': '4G_epc_microwave_baseline',
'frequencies': [
{'frequency': 800, 'bandwidth': 10},
{'frequency': 2600, 'bandwidth': 10},
],
},
{
'scenario': 'S3_200',
'strategy': '4G_epc_microwave_baseline',
'frequencies': [
{'frequency': 800, 'bandwidth': 10},
{'frequency': 2600, 'bandwidth': 10},
],
},
{
'scenario': 'S1_30',
'strategy': '5G_nsa_microwave_baseline',
'frequencies': [
{'frequency': 700, 'bandwidth': 10},
{'frequency': 3500, 'bandwidth': 40},
],
},
{
'scenario': 'S2_50',
'strategy': '5G_nsa_microwave_baseline',
'frequencies': [
{'frequency': 700, 'bandwidth': 10},
{'frequency': 3500, 'bandwidth': 40},
],
},
{
'scenario': 'S3_200',
'strategy': '5G_nsa_microwave_baseline',
'frequencies': [
{'frequency': 700, 'bandwidth': 10},
{'frequency': 3500, 'bandwidth': 40},
],
},
{
'scenario': 'S1_30',
'strategy': '5G_sa_fiber_baseline',
'frequencies': [
{'frequency': 700, 'bandwidth': 10},
{'frequency': 3500, 'bandwidth': 40},
],
},
{
'scenario': 'S2_50',
'strategy': '5G_sa_fiber_baseline',
'frequencies': [
{'frequency': 700, 'bandwidth': 10},
{'frequency': 3500, 'bandwidth': 40},
],
},
{
'scenario': 'S3_200',
'strategy': '5G_sa_fiber_baseline',
'frequencies': [
{'frequency': 700, 'bandwidth': 10},
{'frequency': 3500, 'bandwidth': 40},
],
},
],
'business_model_options': [
{
'scenario': 'S1_30',
'strategy': '5G_nsa_microwave_baseline',
'frequencies': [
{'frequency': 700, 'bandwidth': 10},
{'frequency': 3500, 'bandwidth': 40},
],
},
{
'scenario': 'S2_50',
'strategy': '5G_nsa_microwave_baseline',
'frequencies': [
{'frequency': 700, 'bandwidth': 10},
{'frequency': 3500, 'bandwidth': 40},
],
},
{
'scenario': 'S3_200',
'strategy': '5G_nsa_microwave_baseline',
'frequencies': [
{'frequency': 700, 'bandwidth': 10},
{'frequency': 3500, 'bandwidth': 40},
],
},
{
'scenario': 'S1_30',
'strategy': '5G_nsa_microwave_passive',
'frequencies': [
{'frequency': 700, 'bandwidth': 10},
{'frequency': 3500, 'bandwidth': 40},
],
},
{
'scenario': 'S2_50',
'strategy': '5G_nsa_microwave_passive',
'frequencies': [
{'frequency': 700, 'bandwidth': 10},
{'frequency': 3500, 'bandwidth': 40},
],
},
{
'scenario': 'S3_200',
'strategy': '5G_nsa_microwave_passive',
'frequencies': [
{'frequency': 700, 'bandwidth': 10},
{'frequency': 3500, 'bandwidth': 40},
],
},
{
'scenario': 'S1_30',
'strategy': '5G_nsa_microwave_active',
'frequencies': [
{'frequency': 700, 'bandwidth': 10},
{'frequency': 3500, 'bandwidth': 40},
],
},
{
'scenario': 'S2_50',
'strategy': '5G_nsa_microwave_active',
'frequencies': [
{'frequency': 700, 'bandwidth': 10},
{'frequency': 3500, 'bandwidth': 40},
],
},
{
'scenario': 'S3_200',
'strategy': '5G_nsa_microwave_active',
'frequencies': [
{'frequency': 700, 'bandwidth': 10},
{'frequency': 3500, 'bandwidth': 40},
],
},
],
}
| 30.15528
| 53
| 0.414624
| 330
| 4,855
| 5.863636
| 0.142424
| 0.11938
| 0.186047
| 0.248062
| 0.923514
| 0.910078
| 0.910078
| 0.910078
| 0.910078
| 0.910078
| 0
| 0.100937
| 0.42863
| 4,855
| 160
| 54
| 30.34375
| 0.596611
| 0.026982
| 0
| 0.733333
| 0
| 0
| 0.359703
| 0.082291
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.02
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
f410c12b329ce2cec923f28510627a0e9edf25cc
| 8,519
|
py
|
Python
|
pymofscreen/default_calculators.py
|
faradaymahe/py-mof_screen
|
2c034160abfba6e1f75947c0b674da1fd1ad216f
|
[
"MIT"
] | 16
|
2019-01-27T18:00:58.000Z
|
2022-03-15T01:19:05.000Z
|
pymofscreen/default_calculators.py
|
faradaymahe/py-mof_screen
|
2c034160abfba6e1f75947c0b674da1fd1ad216f
|
[
"MIT"
] | 4
|
2018-11-29T06:19:55.000Z
|
2021-12-09T05:03:22.000Z
|
pymofscreen/default_calculators.py
|
faradaymahe/py-mof_screen
|
2c034160abfba6e1f75947c0b674da1fd1ad216f
|
[
"MIT"
] | 6
|
2019-01-03T21:31:23.000Z
|
2021-12-09T01:21:05.000Z
|
from ase.calculators.vasp import Vasp
#default parameters for calculators
defaults = {
'xc': 'PBE',
'ivdw': 12,
'encut': 520,
'prec': 'Accurate',
'algo': 'All',
'ediff': 1e-4,
'nelm': 150,
'nelmin': 3,
'lreal': False,
'ismear': 0,
'sigma': 0.01,
'nsw': 500,
'ediffg': -0.03,
'lorbit': 11,
'isym': 0,
'symprec': 1e-8,
'setups': {'base':'recommended','Li':'','W':'_sv','Yb':'_3','Eu':'_3'},
'ldau_luj': None,
'lasph': False,
'nupdown': -1,
'nedos': 3000
}
def calcs(calc_name):
"""
Define the default calculators for relaxations
Note: it should not include the kpts, gamma, or images keywords!
Args:
calc_name (string): name of calculator
Returns:
calc (dict): ASE Vasp calculator dictionary
"""
if calc_name == 'scf_test':
if 'xc_start' not in defaults:
defaults['xc_start'] = defaults['xc']
calc = Vasp(
xc=defaults['xc_start'],
setups=defaults['setups'],
ivdw=defaults['ivdw'],
prec=defaults['prec'],
algo=defaults['algo'],
ediff=defaults['ediff'],
nelm=defaults['nelm']*2,
nelmin=defaults['nelmin'],
lreal=defaults['lreal'],
ismear=defaults['ismear'],
sigma=defaults['sigma'],
lcharg=False,
lwave=True,
lorbit=defaults['lorbit'],
isym=defaults['isym'],
symprec=defaults['symprec'],
nsw=0,
istart=0,
ldau_luj=defaults['ldau_luj'],
lasph=defaults['lasph'],
nupdown=defaults['nupdown']
)
elif calc_name == 'ase_bfgs':
calc = Vasp(
xc=defaults['xc'],
setups=defaults['setups'],
ivdw=defaults['ivdw'],
prec=defaults['prec'],
algo=defaults['algo'],
ediff=defaults['ediff'],
nelm=defaults['nelm']*1.5,
nelmin=defaults['nelmin'],
lreal=defaults['lreal'],
ismear=defaults['ismear'],
sigma=defaults['sigma'],
lcharg=False,
lwave=True,
lorbit=defaults['lorbit'],
isym=defaults['isym'],
symprec=defaults['symprec'],
ldau_luj=defaults['ldau_luj'],
lasph=defaults['lasph'],
nupdown=defaults['nupdown']
)
elif calc_name == 'isif2_lowacc':
calc = Vasp(
xc=defaults['xc'],
setups=defaults['setups'],
ivdw=defaults['ivdw'],
prec=defaults['prec'],
algo=defaults['algo'],
ediff=defaults['ediff'],
nelm=defaults['nelm'],
nelmin=defaults['nelmin'],
lreal=defaults['lreal'],
ismear=defaults['ismear'],
sigma=defaults['sigma'],
lcharg=False,
lwave=True,
ibrion=2,
isif=2,
nsw=250,
ediffg=-0.05,
lorbit=defaults['lorbit'],
isym=defaults['isym'],
symprec=defaults['symprec'],
ldau_luj=defaults['ldau_luj'],
lasph=defaults['lasph'],
nupdown=defaults['nupdown']
)
elif calc_name == 'isif2_medacc':
calc = Vasp(
xc=defaults['xc'],
setups=defaults['setups'],
ivdw=defaults['ivdw'],
prec=defaults['prec'],
algo=defaults['algo'],
ediff=defaults['ediff'],
nelm=defaults['nelm'],
nelmin=8,
lreal=defaults['lreal'],
ismear=defaults['ismear'],
sigma=defaults['sigma'],
lcharg=False,
lwave=True,
ibrion=3,
iopt=7,
potim=0,
isif=2,
nsw=defaults['nsw'],
ediffg=-0.05,
lorbit=defaults['lorbit'],
isym=defaults['isym'],
symprec=defaults['symprec'],
ldau_luj=defaults['ldau_luj'],
lasph=defaults['lasph'],
nupdown=defaults['nupdown']
)
elif calc_name == 'isif2_highacc':
calc = Vasp(
xc=defaults['xc'],
setups=defaults['setups'],
encut=defaults['encut'],
ivdw=defaults['ivdw'],
prec=defaults['prec'],
algo=defaults['algo'],
ediff=1e-6,
nelm=defaults['nelm'],
nelmin=8,
lreal=defaults['lreal'],
ismear=defaults['ismear'],
sigma=defaults['sigma'],
lcharg=False,
lwave=True,
ibrion=3,
iopt=7,
potim=0,
isif=2,
nsw=defaults['nsw'],
ediffg=defaults['ediffg'],
lorbit=defaults['lorbit'],
isym=defaults['isym'],
symprec=defaults['symprec'],
ldau_luj=defaults['ldau_luj'],
lasph=defaults['lasph'],
nupdown=defaults['nupdown']
)
elif calc_name == 'isif3_lowacc':
calc = Vasp(
xc=defaults['xc'],
setups=defaults['setups'],
encut=defaults['encut'],
ivdw=defaults['ivdw'],
prec=defaults['prec'],
algo=defaults['algo'],
ediff=1e-6,
nelm=defaults['nelm'],
nelmin=defaults['nelmin'],
lreal=defaults['lreal'],
ismear=defaults['ismear'],
sigma=defaults['sigma'],
lcharg=False,
lwave=True,
ibrion=2,
isif=3,
nsw=30,
ediffg=defaults['ediffg'],
lorbit=defaults['lorbit'],
isym=defaults['isym'],
symprec=defaults['symprec'],
ldau_luj=defaults['ldau_luj'],
lasph=defaults['lasph'],
nupdown=defaults['nupdown']
)
elif calc_name == 'isif3_highacc':
calc = Vasp(
xc=defaults['xc'],
setups=defaults['setups'],
encut=defaults['encut'],
ivdw=defaults['ivdw'],
prec=defaults['prec'],
algo=defaults['algo'],
ediff=1e-6,
nelm=defaults['nelm'],
nelmin=defaults['nelmin'],
lreal=defaults['lreal'],
ismear=defaults['ismear'],
sigma=defaults['sigma'],
lcharg=False,
lwave=True,
ibrion=2,
isif=3,
nsw=30,
ediffg=defaults['ediffg'],
lorbit=defaults['lorbit'],
isym=defaults['isym'],
symprec=defaults['symprec'],
ldau_luj=defaults['ldau_luj'],
lasph=defaults['lasph'],
nupdown=defaults['nupdown']
)
elif calc_name == 'final_spe':
calc = Vasp(
xc=defaults['xc'],
setups=defaults['setups'],
encut=defaults['encut'],
ivdw=defaults['ivdw'],
prec=defaults['prec'],
algo=defaults['algo'],
ediff=1e-6,
nelm=defaults['nelm']*1.5,
lreal=False,
ismear=defaults['ismear'],
sigma=defaults['sigma'],
lcharg=True,
laechg=True,
lwave=True,
nsw=0,
lorbit=defaults['lorbit'],
isym=defaults['isym'],
symprec=defaults['symprec'],
addgrid=False,
ldau_luj=defaults['ldau_luj'],
lasph=defaults['lasph'],
nupdown=defaults['nupdown'],
nedos=defaults['nedos']
)
elif calc_name == 'cineb_lowacc':
calc = Vasp(
xc=defaults['xc'],
setups=defaults['setups'],
ivdw=defaults['ivdw'],
prec=defaults['prec'],
algo=defaults['algo'],
ediff=1e-6,
nelm=100,
nelmin=defaults['nelmin'],
lreal=defaults['lreal'],
ismear=defaults['ismear'],
sigma=defaults['sigma'],
lcharg=False,
lwave=True,
ibrion=3,
potim=0,
iopt=1,
nsw=defaults['nsw'],
ediffg=-0.1,
lclimb=True,
lorbit=defaults['lorbit'],
isym=defaults['isym'],
symprec=defaults['symprec'],
ichain=0,
ldau_luj=defaults['ldau_luj'],
lasph=defaults['lasph'],
nupdown=defaults['nupdown']
)
elif calc_name == 'dimer_lowacc':
calc = Vasp(
xc=defaults['xc'],
setups=defaults['setups'],
ivdw=defaults['ivdw'],
prec=defaults['prec'],
algo=defaults['algo'],
ediff=1e-8,
nelm=defaults['nelm'],
nelmin=defaults['nelmin'],
lreal=defaults['lreal'],
ismear=defaults['ismear'],
sigma=defaults['sigma'],
lcharg=False,
lwave=True,
ibrion=3,
potim=0,
iopt=7,
nsw=defaults['nsw']*4,
ediffg=-0.075,
lorbit=defaults['lorbit'],
isym=defaults['isym'],
symprec=defaults['symprec'],
ichain=2,
ldau_luj=defaults['ldau_luj'],
lasph=defaults['lasph'],
nupdown=defaults['nupdown']
)
elif calc_name == 'dimer_medacc':
calc = Vasp(
xc=defaults['xc'],
setups=defaults['setups'],
ivdw=defaults['ivdw'],
prec=defaults['prec'],
algo=defaults['algo'],
ediff=1e-8,
nelm=defaults['nelm'],
nelmin=defaults['nelmin'],
lreal=defaults['lreal'],
ismear=defaults['ismear'],
sigma=defaults['sigma'],
lcharg=False,
lwave=True,
ibrion=3,
potim=0,
iopt=7,
nsw=defaults['nsw']*2,
ediffg=defaults['ediffg'],
lorbit=defaults['lorbit'],
isym=defaults['isym'],
symprec=defaults['symprec'],
ichain=2,
ldau_luj=defaults['ldau_luj'],
lasph=defaults['lasph'],
nupdown=defaults['nupdown']
)
elif calc_name == 'dimer_highacc':
calc = Vasp(
xc=defaults['xc'],
encut=defaults['encut'],
setups=defaults['setups'],
ivdw=defaults['ivdw'],
prec=defaults['prec'],
algo=defaults['algo'],
ediff=1e-8,
nelm=defaults['nelm'],
nelmin=defaults['nelmin'],
lreal=defaults['lreal'],
ismear=defaults['ismear'],
sigma=defaults['sigma'],
lcharg=False,
lwave=True,
ibrion=3,
potim=0,
iopt=7,
nsw=defaults['nsw']*2,
ediffg=defaults['ediffg'],
lorbit=defaults['lorbit'],
isym=defaults['isym'],
symprec=defaults['symprec'],
ichain=2,
ldau_luj=defaults['ldau_luj'],
lasph=defaults['lasph'],
nupdown=defaults['nupdown']
)
else:
raise ValueError('Out of range for calculators')
return calc
| 23.275956
| 72
| 0.630121
| 1,033
| 8,519
| 5.141336
| 0.120039
| 0.03295
| 0.022595
| 0.04067
| 0.863303
| 0.85483
| 0.849746
| 0.841461
| 0.841461
| 0.829411
| 0
| 0.016875
| 0.172203
| 8,519
| 366
| 73
| 23.275956
| 0.736245
| 0.031576
| 0
| 0.798867
| 0
| 0
| 0.170179
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002833
| false
| 0
| 0.002833
| 0
| 0.008499
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f42b8f64454354829eca4f069154ddc0ce595835
| 2,823
|
py
|
Python
|
xldigest/widgets/dialogs.py
|
hammerheadlemon/xldigest
|
f5130121de5e1b152aa42bdd29ffd57cff6b5733
|
[
"MIT"
] | null | null | null |
xldigest/widgets/dialogs.py
|
hammerheadlemon/xldigest
|
f5130121de5e1b152aa42bdd29ffd57cff6b5733
|
[
"MIT"
] | null | null | null |
xldigest/widgets/dialogs.py
|
hammerheadlemon/xldigest
|
f5130121de5e1b152aa42bdd29ffd57cff6b5733
|
[
"MIT"
] | null | null | null |
from PyQt5 import QtWidgets
class AddPortfolioDialog(QtWidgets.QDialog):
def __init__(self, parent=None):
super().__init__(parent)
name_label = QtWidgets.QLabel("Portfolio Name")
self.name_lineEdit = QtWidgets.QLineEdit("Portfolio Name")
buttonBox = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok |
QtWidgets.QDialogButtonBox.Cancel)
grid = QtWidgets.QGridLayout()
grid.addWidget(name_label, 0, 0)
grid.addWidget(self.name_lineEdit, 0, 1)
grid.addWidget(buttonBox, 2, 0, 1, 2)
self.setLayout(grid)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
class AddProjectDialog(QtWidgets.QDialog):
def __init__(self, parent=None):
super().__init__(parent)
name_label = QtWidgets.QLabel("Project Name")
self.name_lineEdit = QtWidgets.QLineEdit("Project Name")
buttonBox = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok |
QtWidgets.QDialogButtonBox.Cancel)
grid = QtWidgets.QGridLayout()
grid.addWidget(name_label, 0, 0)
grid.addWidget(self.name_lineEdit, 1, 1)
grid.addWidget(buttonBox, 2, 0, 1, 2)
self.setLayout(grid)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
class AddSeriesDialog(QtWidgets.QDialog):
def __init__(self, parent=None):
super().__init__(parent)
name_label = QtWidgets.QLabel("Series Name")
self.name_lineEdit = QtWidgets.QLineEdit("Series Name")
buttonBox = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok |
QtWidgets.QDialogButtonBox.Cancel)
grid = QtWidgets.QGridLayout()
grid.addWidget(name_label, 0, 0)
grid.addWidget(self.name_lineEdit, 0, 1)
grid.addWidget(buttonBox, 2, 0, 1, 2)
self.setLayout(grid)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
class AddSeriesItemDialog(QtWidgets.QDialog):
def __init__(self, parent=None):
super().__init__(parent)
name_label = QtWidgets.QLabel("Series Item Name")
self.name_lineEdit = QtWidgets.QLineEdit("Series Item Name")
buttonBox = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok |
QtWidgets.QDialogButtonBox.Cancel)
grid = QtWidgets.QGridLayout()
grid.addWidget(name_label, 0, 0)
grid.addWidget(self.name_lineEdit, 1, 1)
grid.addWidget(buttonBox, 2, 0, 1, 2)
self.setLayout(grid)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
| 42.134328
| 81
| 0.646121
| 286
| 2,823
| 6.20979
| 0.143357
| 0.168919
| 0.072072
| 0.051802
| 0.92286
| 0.92286
| 0.880068
| 0.830518
| 0.830518
| 0.830518
| 0
| 0.015655
| 0.253277
| 2,823
| 66
| 82
| 42.772727
| 0.82685
| 0
| 0
| 0.77193
| 0
| 0
| 0.037562
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0
| 0.017544
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f47cc24bebf5c002e908cc662cdf5cdc5694bf1c
| 252
|
py
|
Python
|
LPII/Aula 08/dns.py
|
agAlexandre/Faculdade
|
d52a7b247195973990c8ec7b11f7693de2264730
|
[
"Apache-2.0"
] | null | null | null |
LPII/Aula 08/dns.py
|
agAlexandre/Faculdade
|
d52a7b247195973990c8ec7b11f7693de2264730
|
[
"Apache-2.0"
] | null | null | null |
LPII/Aula 08/dns.py
|
agAlexandre/Faculdade
|
d52a7b247195973990c8ec7b11f7693de2264730
|
[
"Apache-2.0"
] | null | null | null |
class DNS():
def __init__(self):
self.dicServidores = {}
def cadastrarServidor(self, nome, servidor):
self.dicServidores[nome] = servidor
def consultarServidor(self, nome):
return self.dicServidores[nome]
| 25.2
| 49
| 0.634921
| 24
| 252
| 6.5
| 0.458333
| 0.326923
| 0.269231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.261905
| 252
| 9
| 50
| 28
| 0.83871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0
| 0.142857
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
be3cde3d4d063a5465bcdb56674ce8a1dd68641c
| 305
|
py
|
Python
|
tests/parser/range.13.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/range.13.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/range.13.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
% Range facts for truely negative predicates.
% small(0..12).
%-small(13..19).
% Ensure inconsistency is caught!
small(13).
"""
output = """
% Range facts for truely negative predicates.
% small(0..12).
%-small(13..19).
% Ensure inconsistency is caught!
small(13).
"""
| 14.52381
| 46
| 0.613115
| 38
| 305
| 4.921053
| 0.447368
| 0.149733
| 0.139037
| 0.203209
| 0.941176
| 0.941176
| 0.941176
| 0.941176
| 0.941176
| 0.941176
| 0
| 0.074689
| 0.209836
| 305
| 20
| 47
| 15.25
| 0.701245
| 0
| 0
| 0.857143
| 0
| 0
| 0.892734
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.