hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4c44a5475972c8b18b4e13064d3de052dc79e744
| 333
|
py
|
Python
|
construct_ui/forms/__init__.py
|
construct-org/construct_ui
|
933a73270ec46704f1ddaf4fabe4947373d8d8a2
|
[
"MIT"
] | null | null | null |
construct_ui/forms/__init__.py
|
construct-org/construct_ui
|
933a73270ec46704f1ddaf4fabe4947373d8d8a2
|
[
"MIT"
] | 4
|
2018-05-07T16:02:53.000Z
|
2018-05-21T15:04:56.000Z
|
construct_ui/forms/__init__.py
|
construct-org/construct_ui
|
933a73270ec46704f1ddaf4fabe4947373d8d8a2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from construct_ui.forms.actionform import ActionForm
from construct_ui.forms.fileopen import FileOpenForm
from construct_ui.forms.filesave import FileSaveForm
from construct_ui.forms.publish import PublishForm
from construct_ui.forms.workspaces import SetWorkspaceForm
| 37
| 58
| 0.852853
| 43
| 333
| 6.372093
| 0.44186
| 0.237226
| 0.273723
| 0.364964
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0033
| 0.09009
| 333
| 8
| 59
| 41.625
| 0.90099
| 0.063063
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
910d80cd894afe104d3222d5c81887738e5f3d18
| 9,980
|
py
|
Python
|
blockchain/all_challenges/2021/realworld/rwctf3rd-Re-Montagy/deploy/ethbot/src/deployer/main.py
|
yuxingzh/CTF
|
2d58d8443628b33958cafc65813c4719e78e6ee7
|
[
"MIT"
] | null | null | null |
blockchain/all_challenges/2021/realworld/rwctf3rd-Re-Montagy/deploy/ethbot/src/deployer/main.py
|
yuxingzh/CTF
|
2d58d8443628b33958cafc65813c4719e78e6ee7
|
[
"MIT"
] | null | null | null |
blockchain/all_challenges/2021/realworld/rwctf3rd-Re-Montagy/deploy/ethbot/src/deployer/main.py
|
yuxingzh/CTF
|
2d58d8443628b33958cafc65813c4719e78e6ee7
|
[
"MIT"
] | null | null | null |
from src.utils.auth import get_acc_nonce
from src.utils.prettyprint.Red import Printer, Processor, Formator
from src.utils.compiler import comp
from src.utils.utils import randhex
from src.banner.text.corpus import DEPLOY_SUCCESS_CELEBRATION
from src.deployer import Montagy
from src.interface import IMontagy
import re
def finish_deploy(montage_address):
Printer.ppln(DEPLOY_SUCCESS_CELEBRATION)
#return bytes.fromhex(montage_address.split("0x")[1])
return montage_address.split("0x")[1]
def run(ctx, _acct):
p = Printer()
processor = Processor(40)
formator = Formator()
#p.ppln(formator.in_all_left("[-] loading deploy cache..."))
#p.ppln("[*] loading deploy cache...")
ctx = comp(ctx)
processor.update(3)
acc_nonce = get_acc_nonce(ctx, _acct)
processor.update(2)
# 1 stage contracts deploy
err, montagy_deploy_txhash = Montagy.deploy(ctx, _acct, acc_nonce)
processor.update(5)
montage_address = Montagy.review(ctx, _acct, montagy_deploy_txhash)
processor.update(5)
# 2 stage P1 P2 deploy
# input_p1code = "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000740608060405234801561001057600080fd5b50336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506106e0806100606000396000f3fe608060405234801561001057600080fd5b50600436106100575760003560e01c806319ff1d211461005c5780635d831619146100df5780637b76ac911461016c57806381a20b0a1461018a578063fd922a421461040a575b600080fd5b610064610454565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100a4578082015181840152602081019050610089565b50505050905090810190601f1680156100d15780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b61015260048036036101008110156100f657600080fd5b810190808035906020019092919080359060200190929190803590602001909291908035906020019092919080359060200190929190803590602001909291908035906020019092919080359060200190929190505050610491565b604051808215151515815260200191505060405180910390f35b6101746104c1565b6040518082815260200191505060405180910390f35b610408600480360360808110156101a057600080fd5b81019080803590602001906401000000008111156101bd57600080fd5b8201836020820111156101cf57600080fd5b803590602001918460018302840111640100000000831117156101f157600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505091929192908035906020019064010000000081111561025457600080fd5b82018360208201111561026657600080fd5b8035906020019184600183028401116401000000008311171561028857600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050509192919290803590602001906401000000008111156102eb57600080fd5b8201836020820111156102fd57600080fd5b8035906020019184600183028401116401000000008311171561031f57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505091929192908035906020019064010000000081111561038257600080fd5b82018360208201111561039457600080fd5b803590602001918460018302840111640100000000831117156103b657600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505091929192905050506104c9565b005b610412610686565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60606040518060400160405280601081526020017f526561645772697465436154466c616700000000000000000000000000000000815250905090565b600062726365828486181886888a168b8d1818181710156104b157600080fd5b6001905098975050505050505050565b600080905090565b671234567890abcdef60c01b77ffffffffffffffffffffffffffffffffffffffffffffffff191683805190602001208580519060200120161461050b57600080fd5b67abcdef123456789060c01b77ffffffffffffffffffffffffffffffffffffffffffffffff191682805190602001208480519060200120181461054d57600080fd5b67a1b2c3d4e5f6098760c01b77ffffffffffffffffffffffffffffffffffffffffffffffff191682805190602001208580519060200120171461058f57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166376fe1e92826040518263ffffffff1660e01b81526004018080602001828103825283818151815260200191508051906020019080838360005b8381101561061c578082015181840152602081019050610601565b50505050905090810190601f1680156106495780820380516001836020036101000a031916815260200191505b5092505050600060405180830381600087803b15801561066857600080fd5b505af115801561067c573d6000803e3d6000fd5b5050505050505050565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff168156fea265627a7a723158206302876a93a2f8c254feea0484f042ce2eeea0380fd5fca199b197d71e2e656e64736f6c634300050b0032"
# input_p1code = re.sub('a265627a7a72305820.{64}64736f6c634300050b0032', 'a265627a7a72305820'+randhex(64)+'64736f6c634300050b0032', input_p1code)
#input_p1code.repalce("627a7a723158206302876a93a2f8c254feea0484f042ce2eeea0380fd5fca199b197d71e2e656e64736f6c634300050b0032", "627a7a" + ''.join(random.sample(string.hexdigits, 10)).lower()*9 + "0032")
input_p2code = "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000005a2608060405234801561001057600080fd5b50336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550610542806100606000396000f3fe608060405234801561001057600080fd5b506004361061004c5760003560e01c806304f77cfa146100515780634059e88714610073578063fd922a42146101c5578063ffa644851461020f575b600080fd5b61005961028e565b604051808215151515815260200191505060405180910390f35b6101c36004803603604081101561008957600080fd5b81019080803590602001906401000000008111156100a657600080fd5b8201836020820111156100b857600080fd5b803590602001918460018302840111640100000000831117156100da57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505091929192908035906020019064010000000081111561013d57600080fd5b82018360208201111561014f57600080fd5b8035906020019184600183028401116401000000008311171561017157600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050509192919290505050610362565b005b6101cd61049e565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b61028c600480360361012081101561022657600080fd5b810190808035906020019092919080359060200190929190803590602001909291908035906020019092919080359060200190929190803590602001909291908035906020019092919080359060200190929190803590602001909291905050506104c3565b005b6000806009546008546007541818600654600554600454181860035460025460015418180101905060006009546006546003540101600854600554600254010160075460045460015401011818905063aabbccdd818301106102ef57600080fd5b708261e26b90505061031256e5afb60721cb821161030c57600080fd5b8082027ef35b6080614321368282376084810151606401816080016143855161051756101561033a57600080fd5b6f65e670d9bd540cea22fdab97e36840e2818303101561035957600080fd5b60019250505090565b61036a61028e565b61037357600080fd5b716111d850336107ef16565b908018915a905660701b6dffffffffffffffffffffffffffff19168280519060200120141561049a576000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166376fe1e92826040518263ffffffff1660e01b81526004018080602001828103825283818151815260200191508051906020019080838360005b8381101561043557808201518184015260208101905061041a565b50505050905090810190601f1680156104625780820380516001836020036101000a031916815260200191505b5092505050600060405180830381600087803b15801561048157600080fd5b505af1158015610495573d6000803e3d6000fd5b505050505b5050565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b88600181905550876002819055508660038190555085600481905550846005819055508360068190555082600781905550816008819055508060098190555050505050505050505056fea265627a7a723158205b60025b8060051461049a57818160041b1c61ffff16919060010161051a564d64736f6c634300050b0032"
#000000000000000000000000000000000000000000000000000000000000
input_p2code = re.sub('a265627a7a723158205b60025b8060051461049a57818160041b1c61ffff16919060010161051a56.{2}64736f6c6343.{6}0032', 'a265627a7a723158205b60025b8060051461049a57818160041b1c61ffff16919060010161051a56'+randhex(2)+'64736f6c6343'+randhex(6)+'0032', input_p2code)
#input_p2code.repalce("627a7a723158205b60025b8060051461049a57818160041b1c61ffff16919060010161051a564d64736f6c634300050b0032", "627a7a" + ''.join(random.sample(string.hexdigits, 10)).lower()*9 + "0032")
#input_3_startGame = "0000000000000000000000000000000000000000000000000000000000000001"
#err, p1_deploy_txhash = IMontagy.register1(ctx, montage_address, input_p1code, _acct, acc_nonce)
#processor.update(5)
err, p2_deploy_txhash = IMontagy.register2(ctx, montage_address, input_p2code, _acct, acc_nonce)
processor.update(5)
#IMontagy.register1_review(ctx, _acct, p1_deploy_txhash)
#processor.update(5)
IMontagy.register2_review(ctx, _acct, p2_deploy_txhash)
processor.update(5)
# 3 stage New Puzzle
'''
err, newpuzzle1_txhash = IMontagy.newPuzzle1(ctx, montage_address, input_p1code, _acct, acc_nonce)
processor.update(5)
IMontagy.newPuzzle1_review(ctx, _acct, newpuzzle1_txhash)
processor.update(5)
p1_address = IMontagy.getPAddress(ctx, montage_address, acc_nonce)
processor.update(5)
'''
err, newpuzzle2_txhash = IMontagy.newPuzzle2(ctx, montage_address, input_p2code, _acct, acc_nonce)
processor.update(5)
IMontagy.newPuzzle2_review(ctx, _acct, newpuzzle2_txhash)
processor.update(5)
p2_address = IMontagy.getPAddress(ctx, montage_address, acc_nonce)
processor.update(5)
# finish deployer
return finish_deploy(montage_address)
| 107.311828
| 3,859
| 0.937074
| 362
| 9,980
| 25.616022
| 0.259669
| 0.022646
| 0.020705
| 0.017362
| 0.070312
| 0.056508
| 0.052842
| 0.051979
| 0.051979
| 0.051979
| 0
| 0.646822
| 0.028858
| 9,980
| 92
| 3,860
| 108.478261
| 0.309946
| 0.497595
| 0
| 0.194444
| 0
| 0
| 0.684704
| 0.680869
| 0
| 1
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.222222
| 0
| 0.333333
| 0.027778
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9135cb0f414650f6760d861ff31550aad99e72bd
| 1,356
|
py
|
Python
|
test/__init__.py
|
macbre/now-playing-graph
|
2c211ef5544a6892e8e5fc2556ffcc6b7ca7ab84
|
[
"MIT"
] | null | null | null |
test/__init__.py
|
macbre/now-playing-graph
|
2c211ef5544a6892e8e5fc2556ffcc6b7ca7ab84
|
[
"MIT"
] | 3
|
2019-02-07T17:46:04.000Z
|
2019-02-15T14:27:09.000Z
|
test/__init__.py
|
macbre/now-playing-graph
|
2c211ef5544a6892e8e5fc2556ffcc6b7ca7ab84
|
[
"MIT"
] | null | null | null |
STREAM = """
data: {"updated":"2019-01-20T18:50:24.980","now":{"artist":"Eivør Pálsdóttir","title":"Elisabeth og Elinborg","start":"2019-01-20T18:50:23.541"},"next":{"artist":"Benjamin Rajani","title":"Sálmur 40","start":"2019-01-20T18:54:36.950"}}
data: {"updated":"2019-01-21T04:02:57.133","now":{"artist":"Enekk","title":"Ódn","start":"2019-01-21T04:02:55.506"},"next":{"artist":"Ragnar í vík","title":"You Broke Your Own Heart","start":"2019-01-21T04:07:24"}}
data: {"updated":"2019-01-23T02:42:21.638","now":{"artist":"Eivør Pálsdóttir","title":"Vársins ljóð","start":"2019-01-23T02:42:19.771"},"next":{"artist":"Jens John Jakobsen","title":"Undur sólar hita","start":"2019-01-23T02:46:54.400"}}
data: {"updated":"2019-01-26T06:23:55.616","now":{"artist":"Enekk","title":"Slatur","start":"2019-01-26T06:23:54.161"},"next":{"artist":"Taxi","title":"Meistarin","start":"2019-01-26T06:27:09.274"}}
data: {"updated":"2019-02-04T12:10:24.312","now":{"artist":"Eivør Pálsdóttir","title":"Mannabarn","start":"2019-02-04T12:10:22.916"},"next":{"artist":"The Dreams","title":"Verden vil bedrages","start":"2019-02-04T12:15:15.660"}}
data: {"updated":"2019-01-22T12:08:11.478","now":{"artist":"Orka","title":"Hon leitar","start":"2019-01-22T12:08:10.052"},"next":{"artist":"Holgar","title":"Veitslan","start":"2019-01-22T12:10:22.780"}}
""".strip().split("\n")
| 150.666667
| 236
| 0.655605
| 217
| 1,356
| 4.096774
| 0.447005
| 0.101237
| 0.123735
| 0.095613
| 0.097863
| 0
| 0
| 0
| 0
| 0
| 0
| 0.231411
| 0.028024
| 1,356
| 8
| 237
| 169.5
| 0.443096
| 0
| 0
| 0
| 0
| 0.75
| 0.974926
| 0.893068
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
913a5bd8cf30da74b60e565c73d5967e37cb8622
| 10,123
|
py
|
Python
|
trainers.py
|
popescuaaa/sequences-classification
|
c88a6f53e1a7f65c7ba758199b8b19ac25e8fe28
|
[
"MIT"
] | 1
|
2022-03-12T21:31:05.000Z
|
2022-03-12T21:31:05.000Z
|
trainers.py
|
popescuaaa/sequences-classification
|
c88a6f53e1a7f65c7ba758199b8b19ac25e8fe28
|
[
"MIT"
] | null | null | null |
trainers.py
|
popescuaaa/sequences-classification
|
c88a6f53e1a7f65c7ba758199b8b19ac25e8fe28
|
[
"MIT"
] | null | null | null |
import torch
from typing import Dict, Tuple, List
from models import RNNClassifier, LSTMClassifier, GRUClassifier, CNNClassifier, TCNNClassifier
import torch.nn as nn
from data import Pendigits
from torch.utils.data import DataLoader
from torch.optim import Adam
from torch import Tensor
from evaluation_metrics import cm
def compute_validation_loss(model: nn.Module, cfg: Dict, criterion: nn.CrossEntropyLoss) -> Tensor:
device = torch.device(cfg['system']['device'])
model.eval()
val_ds = Pendigits.Pendigits(file_path='./data/pendigits.tes')
data, labels = val_ds.get_all()
data = torch.from_numpy(data)
labels = torch.from_numpy(labels)
bs, seq_len, features = data.shape
data = data.view(bs, features, seq_len)
data = data.float()
data = data.to(device)
labels = labels.to(device)
out = model(data)
loss = criterion(out, labels)
return loss
def RNNClassifierTrainer(cfg: Dict) -> Tuple[List, List, List]:
device = torch.device(cfg['system']['device'])
lr = float(cfg['system']['lr'])
num_epochs = int(cfg['rnn']['num_epochs'])
batch_size = int(cfg['system']['batch_size'])
model = RNNClassifier.RNNClassifier(cfg)
model = model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=lr)
ds = Pendigits.Pendigits(file_path='./data/pendigits.tra')
dl = DataLoader(ds, num_workers=2, batch_size=batch_size)
train_loss = []
validation_loss = []
cms = []
for epoch in range(1, num_epochs + 1):
for idx, e in enumerate(dl):
model.train()
digits, labels = e
bs, seq_len, features = digits.shape
digits = digits.view(bs, features, seq_len)
digits = digits.float()
digits = digits.to(device)
labels = labels.to(device)
out = model(digits)
loss = criterion(out, labels)
train_loss.append(loss.cpu().data.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
val_loss = compute_validation_loss(model=model, cfg=cfg, criterion=criterion).cpu().data.item()
validation_loss.append(val_loss)
if idx == len(dl) - 1:
print('[RNN][{}/{}] Training loss: {:.6f} | Validation loss: {:.6f}'.format(
epoch,
num_epochs,
loss.cpu().data.item(),
val_loss))
# Step evaluation with confusion matrix and accuracy score
cms.append(cm(model=model, cfg=cfg))
# Save model
torch.save(model.state_dict(), './trained_models/rnn.pt')
return train_loss, validation_loss, cms
def LSTMClassifierTrainer(cfg: Dict) -> Tuple[List, List, List]:
device = torch.device(cfg['system']['device'])
lr = float(cfg['system']['lr'])
num_epochs = int(cfg['lstm']['num_epochs'])
batch_size = int(cfg['system']['batch_size'])
model = LSTMClassifier.LSTMClassifier(cfg=cfg)
model = model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=lr)
ds = Pendigits.Pendigits('./data/pendigits.tra')
dl = DataLoader(ds, num_workers=2, batch_size=batch_size)
train_loss = []
validation_loss = []
cms = []
for epoch in range(1, num_epochs + 1):
for idx, e in enumerate(dl):
model.train()
digits, labels = e
bs, seq_len, features = digits.shape
digits = digits.view(bs, features, seq_len)
digits = digits.float()
digits = digits.to(device)
labels = labels.to(device)
out = model(digits)
loss = criterion(out, labels)
train_loss.append(loss.cpu().data.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
val_loss = compute_validation_loss(model=model, cfg=cfg, criterion=criterion).cpu().data.item()
validation_loss.append(val_loss)
if idx == len(dl) - 1:
print('[RNN][{}/{}] Training loss: {:.6f} | Validation loss: {:.6f}'.format(
epoch,
num_epochs,
loss.cpu().data.item(),
val_loss))
# Step evaluation with confusion matrix and accuracy score
cms.append(cm(model=model, cfg=cfg))
# Save model
torch.save(model.state_dict(), './trained_models/lstm.pt')
return train_loss, validation_loss, cms
def GRUClassifierTrainer(cfg: Dict) -> Tuple[List, List, List]:
device = torch.device(cfg['system']['device'])
lr = float(cfg['system']['lr'])
num_epochs = int(cfg['gru']['num_epochs'])
batch_size = int(cfg['system']['batch_size'])
model = GRUClassifier.GRUClassifier(cfg=cfg)
model = model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=lr)
ds = Pendigits.Pendigits('./data/pendigits.tra')
dl = DataLoader(ds, num_workers=2, batch_size=batch_size)
train_loss = []
validation_loss = []
cms = []
for epoch in range(1, num_epochs + 1):
for idx, e in enumerate(dl):
model.train()
digits, labels = e
bs, seq_len, features = digits.shape
digits = digits.view(bs, features, seq_len)
digits = digits.float()
digits = digits.to(device)
labels = labels.to(device)
out = model(digits)
loss = criterion(out, labels)
train_loss.append(loss.cpu().data.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
val_loss = compute_validation_loss(model=model, cfg=cfg, criterion=criterion).cpu().data.item()
validation_loss.append(val_loss)
if idx == len(dl) - 1:
print('[RNN][{}/{}] Training loss: {:.6f} | Validation loss: {:.6f}'.format(
epoch,
num_epochs,
loss.cpu().data.item(),
val_loss))
# Step evaluation with confusion matrix and accuracy score
cms.append(cm(model=model, cfg=cfg))
# Save model
torch.save(model.state_dict(), './trained_models/gru.pt')
return train_loss, validation_loss, cms
def CNNClassifierTrainer(cfg: Dict) -> Tuple[List, List, List]:
device = torch.device(cfg['system']['device'])
lr = float(cfg['system']['lr'])
num_epochs = int(cfg['cnn']['num_epochs'])
batch_size = int(cfg['system']['batch_size'])
model = CNNClassifier.CNNClassifier(cfg=cfg)
model = model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=lr)
ds = Pendigits.Pendigits('./data/pendigits.tra')
dl = DataLoader(ds, num_workers=2, batch_size=batch_size)
train_loss = []
validation_loss = []
cms = []
for epoch in range(1, num_epochs + 1):
for idx, e in enumerate(dl):
model.train()
digits, labels = e
bs, seq_len, features = digits.shape
digits = digits.view(bs, features, seq_len)
digits = digits.float()
digits = digits.to(device)
labels = labels.to(device)
out = model(digits)
loss = criterion(out, labels)
train_loss.append(loss.cpu().data.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
val_loss = compute_validation_loss(model=model, cfg=cfg, criterion=criterion).cpu().data.item()
validation_loss.append(val_loss)
if idx == len(dl) - 1:
print('[CNN][{}/{}] Training loss: {:.6f} | Validation loss: {:.6f}'.format(
epoch,
num_epochs,
loss.cpu().data.item(),
val_loss))
# Step evaluation with confusion matrix and accuracy score
cms.append(cm(model=model, cfg=cfg))
# Save model
torch.save(model.state_dict(), './trained_models/cnn.pt')
return train_loss, validation_loss, cms
def TCNClassifierTrainer(cfg: Dict) -> Tuple[List, List, List]:
device = torch.device(cfg['system']['device'])
lr = float(cfg['system']['lr'])
num_epochs = int(cfg['tcn']['num_epochs'])
batch_size = int(cfg['system']['batch_size'])
model = TCNNClassifier.TCNClassifier(cfg=cfg)
model = model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=lr)
ds = Pendigits.Pendigits('./data/pendigits.tra')
dl = DataLoader(ds, num_workers=2, batch_size=batch_size)
train_loss = []
validation_loss = []
cms = []
for epoch in range(1, num_epochs + 1):
for idx, e in enumerate(dl):
model.train()
digits, labels = e
bs, seq_len, features = digits.shape
digits = digits.view(bs, features, seq_len)
digits = digits.float()
digits = digits.to(device)
labels = labels.to(device)
out = model(digits)
loss = criterion(out, labels)
train_loss.append(loss.cpu().data.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
val_loss = compute_validation_loss(model=model, cfg=cfg, criterion=criterion).cpu().data.item()
validation_loss.append(val_loss)
if idx == len(dl) - 1:
print('[TCN][{}/{}] Training loss: {:.6f} | Validation loss: {:.6f}'.format(
epoch,
num_epochs,
loss.cpu().data.item(),
val_loss))
# Step evaluation with confusion matrix and accuracy score
cms.append(cm(model=model, cfg=cfg))
# Save model
torch.save(model.state_dict(), './trained_models/tcn.pt')
return train_loss, validation_loss, cms
| 32.238854
| 107
| 0.580263
| 1,175
| 10,123
| 4.882553
| 0.095319
| 0.063448
| 0.028761
| 0.040091
| 0.861077
| 0.857591
| 0.852013
| 0.837546
| 0.805473
| 0.805473
| 0
| 0.004159
| 0.287365
| 10,123
| 313
| 108
| 32.341853
| 0.791101
| 0.033488
| 0
| 0.803571
| 0
| 0
| 0.081244
| 0.011869
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026786
| false
| 0
| 0.040179
| 0
| 0.09375
| 0.022321
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e67fff5fe29e77eefe239dd7377b7221a00e7468
| 54,681
|
py
|
Python
|
services/healthchecks/src/oci_cli_health_checks/generated/healthchecks_cli.py
|
andrewtvuong/oci-cli
|
7673a808613308a4899c7026964fa2383c30c397
|
[
"Apache-2.0"
] | null | null | null |
services/healthchecks/src/oci_cli_health_checks/generated/healthchecks_cli.py
|
andrewtvuong/oci-cli
|
7673a808613308a4899c7026964fa2383c30c397
|
[
"Apache-2.0"
] | null | null | null |
services/healthchecks/src/oci_cli_health_checks/generated/healthchecks_cli.py
|
andrewtvuong/oci-cli
|
7673a808613308a4899c7026964fa2383c30c397
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
from __future__ import print_function
import click
import oci # noqa: F401
import six # noqa: F401
import sys # noqa: F401
from oci_cli.cli_root import cli
from oci_cli import cli_constants # noqa: F401
from oci_cli import cli_util
from oci_cli import json_skeleton_utils
from oci_cli import custom_types # noqa: F401
from oci_cli.aliasing import CommandGroupWithAlias
@cli.command(cli_util.override('healthchecks_root_group.command_name', 'healthchecks'), cls=CommandGroupWithAlias, help=cli_util.override('healthchecks_root_group.help', """API for the Health Checks service. Use this API to manage endpoint probes and monitors.
For more information, see
[Overview of the Health Checks Service](/iaas/Content/HealthChecks/Concepts/healthchecks.htm).
"""), short_help=cli_util.override('healthchecks_root_group.short_help', """Health Checks API"""))
@cli_util.help_option_group
def healthchecks_root_group():
pass
@click.command(cli_util.override('ping_monitor_group.command_name', 'ping-monitor'), cls=CommandGroupWithAlias, help="""A summary containing all of the mutable and immutable properties for a ping monitor.""")
@cli_util.help_option_group
def ping_monitor_group():
pass
@click.command(cli_util.override('ping_probe_group.command_name', 'ping-probe'), cls=CommandGroupWithAlias, help="""This model contains all of the mutable and immutable properties for a ping probe.""")
@cli_util.help_option_group
def ping_probe_group():
pass
@click.command(cli_util.override('health_checks_vantage_point_group.command_name', 'health-checks-vantage-point'), cls=CommandGroupWithAlias, help="""Information about a vantage point.""")
@cli_util.help_option_group
def health_checks_vantage_point_group():
pass
@click.command(cli_util.override('http_monitor_group.command_name', 'http-monitor'), cls=CommandGroupWithAlias, help="""This model contains all of the mutable and immutable properties for an HTTP monitor.""")
@cli_util.help_option_group
def http_monitor_group():
pass
@click.command(cli_util.override('http_probe_result_group.command_name', 'http-probe-result'), cls=CommandGroupWithAlias, help="""The results returned by running an HTTP probe. All times and durations are returned in milliseconds. All times are relative to the POSIX epoch (1970-01-01T00:00Z). Time properties conform to W3C Resource Timing. For more information, see [PerformanceResourceTiming] interface.""")
@cli_util.help_option_group
def http_probe_result_group():
pass
@click.command(cli_util.override('ping_probe_result_group.command_name', 'ping-probe-result'), cls=CommandGroupWithAlias, help="""The results returned by running a ping probe. All times and durations are returned in milliseconds. All times are relative to the POSIX epoch (1970-01-01T00:00Z).""")
@cli_util.help_option_group
def ping_probe_result_group():
pass
@click.command(cli_util.override('http_probe_group.command_name', 'http-probe'), cls=CommandGroupWithAlias, help="""A summary that contains all of the mutable and immutable properties for an HTTP probe.""")
@cli_util.help_option_group
def http_probe_group():
pass
healthchecks_root_group.add_command(ping_monitor_group)
healthchecks_root_group.add_command(ping_probe_group)
healthchecks_root_group.add_command(health_checks_vantage_point_group)
healthchecks_root_group.add_command(http_monitor_group)
healthchecks_root_group.add_command(http_probe_result_group)
healthchecks_root_group.add_command(ping_probe_result_group)
healthchecks_root_group.add_command(http_probe_group)
@http_monitor_group.command(name=cli_util.override('create_http_monitor.command_name', 'create'), help=u"""Creates an HTTP monitor. Vantage points will be automatically selected if not specified, and probes will be initiated from each vantage point to each of the targets at the frequency specified by `intervalInSeconds`.""")
@cli_util.option('--compartment-id', required=True, help=u"""The OCID of the compartment.""")
@cli_util.option('--targets', required=True, type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--protocol', required=True, type=custom_types.CliCaseInsensitiveChoice(["HTTP", "HTTPS"]), help=u"""""")
@cli_util.option('--display-name', required=True, help=u"""A user-friendly and mutable name suitable for display in a user interface.""")
@cli_util.option('--interval-in-seconds', required=True, type=click.INT, help=u"""The monitor interval in seconds. Valid values: 10, 30, and 60.""")
@cli_util.option('--vantage-point-names', type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--port', type=click.INT, help=u"""The port on which to probe endpoints. If unspecified, probes will use the default port of their protocol.""")
@cli_util.option('--timeout-in-seconds', type=click.INT, help=u"""The probe timeout in seconds. Valid values: 10, 20, 30, and 60. The probe timeout must be less than or equal to `intervalInSeconds` for monitors.""")
@cli_util.option('--method', type=custom_types.CliCaseInsensitiveChoice(["GET", "HEAD"]), help=u"""""")
@cli_util.option('--path', help=u"""The optional URL path to probe, including query parameters.""")
@cli_util.option('--headers', type=custom_types.CLI_COMPLEX_TYPE, help=u"""A dictionary of HTTP request headers.
*Note:* Monitors and probes do not support the use of the `Authorization` HTTP header.""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--is-enabled', type=click.BOOL, help=u"""Enables or disables the monitor. Set to 'true' to launch monitoring.""")
@cli_util.option('--freeform-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags]. Example: `{\"Department\": \"Finance\"}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--defined-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags]. Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@json_skeleton_utils.get_cli_json_input_option({'targets': {'module': 'healthchecks', 'class': 'list[string]'}, 'vantage-point-names': {'module': 'healthchecks', 'class': 'list[string]'}, 'headers': {'module': 'healthchecks', 'class': 'dict(str, string)'}, 'freeform-tags': {'module': 'healthchecks', 'class': 'dict(str, string)'}, 'defined-tags': {'module': 'healthchecks', 'class': 'dict(str, dict(str, object))'}})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'targets': {'module': 'healthchecks', 'class': 'list[string]'}, 'vantage-point-names': {'module': 'healthchecks', 'class': 'list[string]'}, 'headers': {'module': 'healthchecks', 'class': 'dict(str, string)'}, 'freeform-tags': {'module': 'healthchecks', 'class': 'dict(str, string)'}, 'defined-tags': {'module': 'healthchecks', 'class': 'dict(str, dict(str, object))'}}, output_type={'module': 'healthchecks', 'class': 'HttpMonitor'})
@cli_util.wrap_exceptions
def create_http_monitor(ctx, from_json, compartment_id, targets, protocol, display_name, interval_in_seconds, vantage_point_names, port, timeout_in_seconds, method, path, headers, is_enabled, freeform_tags, defined_tags):
kwargs = {}
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
details = {}
details['compartmentId'] = compartment_id
details['targets'] = cli_util.parse_json_parameter("targets", targets)
details['protocol'] = protocol
details['displayName'] = display_name
details['intervalInSeconds'] = interval_in_seconds
if vantage_point_names is not None:
details['vantagePointNames'] = cli_util.parse_json_parameter("vantage_point_names", vantage_point_names)
if port is not None:
details['port'] = port
if timeout_in_seconds is not None:
details['timeoutInSeconds'] = timeout_in_seconds
if method is not None:
details['method'] = method
if path is not None:
details['path'] = path
if headers is not None:
details['headers'] = cli_util.parse_json_parameter("headers", headers)
if is_enabled is not None:
details['isEnabled'] = is_enabled
if freeform_tags is not None:
details['freeformTags'] = cli_util.parse_json_parameter("freeform_tags", freeform_tags)
if defined_tags is not None:
details['definedTags'] = cli_util.parse_json_parameter("defined_tags", defined_tags)
client = cli_util.build_client('health_checks', ctx)
result = client.create_http_monitor(
create_http_monitor_details=details,
**kwargs
)
cli_util.render_response(result, ctx)
@http_probe_group.command(name=cli_util.override('create_on_demand_http_probe.command_name', 'create-on-demand'), help=u"""Creates an on-demand HTTP probe. The location response header contains the URL for fetching the probe results.
*Note:* On-demand probe configurations are not saved.""")
@cli_util.option('--compartment-id', required=True, help=u"""The OCID of the compartment.""")
@cli_util.option('--targets', required=True, type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--protocol', required=True, type=custom_types.CliCaseInsensitiveChoice(["HTTP", "HTTPS"]), help=u"""""")
@cli_util.option('--vantage-point-names', type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--port', type=click.INT, help=u"""The port on which to probe endpoints. If unspecified, probes will use the default port of their protocol.""")
@cli_util.option('--timeout-in-seconds', type=click.INT, help=u"""The probe timeout in seconds. Valid values: 10, 20, 30, and 60. The probe timeout must be less than or equal to `intervalInSeconds` for monitors.""")
@cli_util.option('--method', type=custom_types.CliCaseInsensitiveChoice(["GET", "HEAD"]), help=u"""""")
@cli_util.option('--path', help=u"""The optional URL path to probe, including query parameters.""")
@cli_util.option('--headers', type=custom_types.CLI_COMPLEX_TYPE, help=u"""A dictionary of HTTP request headers.
*Note:* Monitors and probes do not support the use of the `Authorization` HTTP header.""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@json_skeleton_utils.get_cli_json_input_option({'targets': {'module': 'healthchecks', 'class': 'list[string]'}, 'vantage-point-names': {'module': 'healthchecks', 'class': 'list[string]'}, 'headers': {'module': 'healthchecks', 'class': 'dict(str, string)'}})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'targets': {'module': 'healthchecks', 'class': 'list[string]'}, 'vantage-point-names': {'module': 'healthchecks', 'class': 'list[string]'}, 'headers': {'module': 'healthchecks', 'class': 'dict(str, string)'}}, output_type={'module': 'healthchecks', 'class': 'HttpProbe'})
@cli_util.wrap_exceptions
def create_on_demand_http_probe(ctx, from_json, compartment_id, targets, protocol, vantage_point_names, port, timeout_in_seconds, method, path, headers):
kwargs = {}
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
details = {}
details['compartmentId'] = compartment_id
details['targets'] = cli_util.parse_json_parameter("targets", targets)
details['protocol'] = protocol
if vantage_point_names is not None:
details['vantagePointNames'] = cli_util.parse_json_parameter("vantage_point_names", vantage_point_names)
if port is not None:
details['port'] = port
if timeout_in_seconds is not None:
details['timeoutInSeconds'] = timeout_in_seconds
if method is not None:
details['method'] = method
if path is not None:
details['path'] = path
if headers is not None:
details['headers'] = cli_util.parse_json_parameter("headers", headers)
client = cli_util.build_client('health_checks', ctx)
result = client.create_on_demand_http_probe(
create_on_demand_http_probe_details=details,
**kwargs
)
cli_util.render_response(result, ctx)
@ping_probe_group.command(name=cli_util.override('create_on_demand_ping_probe.command_name', 'create-on-demand'), help=u"""Creates an on-demand ping probe. The location response header contains the URL for fetching probe results.
*Note:* The on-demand probe configuration is not saved.""")
@cli_util.option('--compartment-id', required=True, help=u"""The OCID of the compartment.""")
@cli_util.option('--targets', required=True, type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--protocol', required=True, type=custom_types.CliCaseInsensitiveChoice(["ICMP", "TCP"]), help=u"""""")
@cli_util.option('--vantage-point-names', type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--port', type=click.INT, help=u"""The port on which to probe endpoints. If unspecified, probes will use the default port of their protocol.""")
@cli_util.option('--timeout-in-seconds', type=click.INT, help=u"""The probe timeout in seconds. Valid values: 10, 20, 30, and 60. The probe timeout must be less than or equal to `intervalInSeconds` for monitors.""")
@json_skeleton_utils.get_cli_json_input_option({'targets': {'module': 'healthchecks', 'class': 'list[string]'}, 'vantage-point-names': {'module': 'healthchecks', 'class': 'list[string]'}})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'targets': {'module': 'healthchecks', 'class': 'list[string]'}, 'vantage-point-names': {'module': 'healthchecks', 'class': 'list[string]'}}, output_type={'module': 'healthchecks', 'class': 'PingProbe'})
@cli_util.wrap_exceptions
def create_on_demand_ping_probe(ctx, from_json, compartment_id, targets, protocol, vantage_point_names, port, timeout_in_seconds):
kwargs = {}
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
details = {}
details['compartmentId'] = compartment_id
details['targets'] = cli_util.parse_json_parameter("targets", targets)
details['protocol'] = protocol
if vantage_point_names is not None:
details['vantagePointNames'] = cli_util.parse_json_parameter("vantage_point_names", vantage_point_names)
if port is not None:
details['port'] = port
if timeout_in_seconds is not None:
details['timeoutInSeconds'] = timeout_in_seconds
client = cli_util.build_client('health_checks', ctx)
result = client.create_on_demand_ping_probe(
create_on_demand_ping_probe_details=details,
**kwargs
)
cli_util.render_response(result, ctx)
@ping_monitor_group.command(name=cli_util.override('create_ping_monitor.command_name', 'create'), help=u"""Creates a ping monitor. Vantage points will be automatically selected if not specified, and probes will be initiated from each vantage point to each of the targets at the frequency specified by `intervalInSeconds`.""")
@cli_util.option('--compartment-id', required=True, help=u"""The OCID of the compartment.""")
@cli_util.option('--targets', required=True, type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--protocol', required=True, type=custom_types.CliCaseInsensitiveChoice(["ICMP", "TCP"]), help=u"""""")
@cli_util.option('--display-name', required=True, help=u"""A user-friendly and mutable name suitable for display in a user interface.""")
@cli_util.option('--interval-in-seconds', required=True, type=click.INT, help=u"""The monitor interval in seconds. Valid values: 10, 30, and 60.""")
@cli_util.option('--vantage-point-names', type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--port', type=click.INT, help=u"""The port on which to probe endpoints. If unspecified, probes will use the default port of their protocol.""")
@cli_util.option('--timeout-in-seconds', type=click.INT, help=u"""The probe timeout in seconds. Valid values: 10, 20, 30, and 60. The probe timeout must be less than or equal to `intervalInSeconds` for monitors.""")
@cli_util.option('--is-enabled', type=click.BOOL, help=u"""Enables or disables the monitor. Set to 'true' to launch monitoring.""")
@cli_util.option('--freeform-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags]. Example: `{\"Department\": \"Finance\"}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--defined-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags]. Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@json_skeleton_utils.get_cli_json_input_option({'targets': {'module': 'healthchecks', 'class': 'list[string]'}, 'vantage-point-names': {'module': 'healthchecks', 'class': 'list[string]'}, 'freeform-tags': {'module': 'healthchecks', 'class': 'dict(str, string)'}, 'defined-tags': {'module': 'healthchecks', 'class': 'dict(str, dict(str, object))'}})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'targets': {'module': 'healthchecks', 'class': 'list[string]'}, 'vantage-point-names': {'module': 'healthchecks', 'class': 'list[string]'}, 'freeform-tags': {'module': 'healthchecks', 'class': 'dict(str, string)'}, 'defined-tags': {'module': 'healthchecks', 'class': 'dict(str, dict(str, object))'}}, output_type={'module': 'healthchecks', 'class': 'PingMonitor'})
@cli_util.wrap_exceptions
def create_ping_monitor(ctx, from_json, compartment_id, targets, protocol, display_name, interval_in_seconds, vantage_point_names, port, timeout_in_seconds, is_enabled, freeform_tags, defined_tags):
kwargs = {}
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
details = {}
details['compartmentId'] = compartment_id
details['targets'] = cli_util.parse_json_parameter("targets", targets)
details['protocol'] = protocol
details['displayName'] = display_name
details['intervalInSeconds'] = interval_in_seconds
if vantage_point_names is not None:
details['vantagePointNames'] = cli_util.parse_json_parameter("vantage_point_names", vantage_point_names)
if port is not None:
details['port'] = port
if timeout_in_seconds is not None:
details['timeoutInSeconds'] = timeout_in_seconds
if is_enabled is not None:
details['isEnabled'] = is_enabled
if freeform_tags is not None:
details['freeformTags'] = cli_util.parse_json_parameter("freeform_tags", freeform_tags)
if defined_tags is not None:
details['definedTags'] = cli_util.parse_json_parameter("defined_tags", defined_tags)
client = cli_util.build_client('health_checks', ctx)
result = client.create_ping_monitor(
create_ping_monitor_details=details,
**kwargs
)
cli_util.render_response(result, ctx)
@http_monitor_group.command(name=cli_util.override('delete_http_monitor.command_name', 'delete'), help=u"""Deletes the HTTP monitor and its configuration. All future probes of this monitor are stopped. Results associated with the monitor are not deleted.""")
@cli_util.option('--monitor-id', required=True, help=u"""The OCID of a monitor.""")
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@cli_util.confirm_delete_option
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={})
@cli_util.wrap_exceptions
def delete_http_monitor(ctx, from_json, monitor_id, if_match):
if isinstance(monitor_id, six.string_types) and len(monitor_id.strip()) == 0:
raise click.UsageError('Parameter --monitor-id cannot be whitespace or empty string')
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('health_checks', ctx)
result = client.delete_http_monitor(
monitor_id=monitor_id,
**kwargs
)
cli_util.render_response(result, ctx)
@ping_monitor_group.command(name=cli_util.override('delete_ping_monitor.command_name', 'delete'), help=u"""Deletes the ping monitor and its configuration. All future probes of this monitor are stopped. Results associated with the monitor are not deleted.""")
@cli_util.option('--monitor-id', required=True, help=u"""The OCID of a monitor.""")
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@cli_util.confirm_delete_option
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={})
@cli_util.wrap_exceptions
def delete_ping_monitor(ctx, from_json, monitor_id, if_match):
if isinstance(monitor_id, six.string_types) and len(monitor_id.strip()) == 0:
raise click.UsageError('Parameter --monitor-id cannot be whitespace or empty string')
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('health_checks', ctx)
result = client.delete_ping_monitor(
monitor_id=monitor_id,
**kwargs
)
cli_util.render_response(result, ctx)
@http_monitor_group.command(name=cli_util.override('get_http_monitor.command_name', 'get'), help=u"""Gets the configuration for the specified monitor.""")
@cli_util.option('--monitor-id', required=True, help=u"""The OCID of a monitor.""")
@cli_util.option('--if-none-match', help=u"""The `If-None-Match` header field makes the request method conditional on the absence of any current representation of the target resource, when the field-value is `*`, or having a selected representation with an entity-tag that does not match any of those listed in the field-value.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'healthchecks', 'class': 'HttpMonitor'})
@cli_util.wrap_exceptions
def get_http_monitor(ctx, from_json, monitor_id, if_none_match):
if isinstance(monitor_id, six.string_types) and len(monitor_id.strip()) == 0:
raise click.UsageError('Parameter --monitor-id cannot be whitespace or empty string')
kwargs = {}
if if_none_match is not None:
kwargs['if_none_match'] = if_none_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('health_checks', ctx)
result = client.get_http_monitor(
monitor_id=monitor_id,
**kwargs
)
cli_util.render_response(result, ctx)
@ping_monitor_group.command(name=cli_util.override('get_ping_monitor.command_name', 'get'), help=u"""Gets the configuration for the specified ping monitor.""")
@cli_util.option('--monitor-id', required=True, help=u"""The OCID of a monitor.""")
@cli_util.option('--if-none-match', help=u"""The `If-None-Match` header field makes the request method conditional on the absence of any current representation of the target resource, when the field-value is `*`, or having a selected representation with an entity-tag that does not match any of those listed in the field-value.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'healthchecks', 'class': 'PingMonitor'})
@cli_util.wrap_exceptions
def get_ping_monitor(ctx, from_json, monitor_id, if_none_match):
if isinstance(monitor_id, six.string_types) and len(monitor_id.strip()) == 0:
raise click.UsageError('Parameter --monitor-id cannot be whitespace or empty string')
kwargs = {}
if if_none_match is not None:
kwargs['if_none_match'] = if_none_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('health_checks', ctx)
result = client.get_ping_monitor(
monitor_id=monitor_id,
**kwargs
)
cli_util.render_response(result, ctx)
@health_checks_vantage_point_group.command(name=cli_util.override('list_health_checks_vantage_points.command_name', 'list'), help=u"""Gets information about all vantage points available to the user.""")
@cli_util.option('--limit', type=click.INT, help=u"""The maximum number of items to return in a paginated \"List\" call.""")
@cli_util.option('--page', help=u"""The value of the `opc-next-page` response header from the previous \"List\" call.""")
@cli_util.option('--sort-by', type=custom_types.CliCaseInsensitiveChoice(["name", "displayName"]), help=u"""The field to sort by when listing vantage points.""")
@cli_util.option('--sort-order', type=custom_types.CliCaseInsensitiveChoice(["ASC", "DESC"]), help=u"""Controls the sort order of results.""")
@cli_util.option('--name', help=u"""Filters results that exactly match the `name` field.""")
@cli_util.option('--display-name', help=u"""Filters results that exactly match the `displayName` field.""")
@cli_util.option('--all', 'all_pages', is_flag=True, help="""Fetches all pages of results. If you provide this option, then you cannot provide the --limit option.""")
@cli_util.option('--page-size', type=click.INT, help="""When fetching results, the number of results to fetch per call. Only valid when used with --all or --limit, and ignored otherwise.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'healthchecks', 'class': 'list[HealthChecksVantagePointSummary]'})
@cli_util.wrap_exceptions
def list_health_checks_vantage_points(ctx, from_json, all_pages, page_size, limit, page, sort_by, sort_order, name, display_name):
if all_pages and limit:
raise click.UsageError('If you provide the --all option you cannot provide the --limit option')
kwargs = {}
if limit is not None:
kwargs['limit'] = limit
if page is not None:
kwargs['page'] = page
if sort_by is not None:
kwargs['sort_by'] = sort_by
if sort_order is not None:
kwargs['sort_order'] = sort_order
if name is not None:
kwargs['name'] = name
if display_name is not None:
kwargs['display_name'] = display_name
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('health_checks', ctx)
if all_pages:
if page_size:
kwargs['limit'] = page_size
result = cli_util.list_call_get_all_results(
client.list_health_checks_vantage_points,
**kwargs
)
elif limit is not None:
result = cli_util.list_call_get_up_to_limit(
client.list_health_checks_vantage_points,
limit,
page_size,
**kwargs
)
else:
result = client.list_health_checks_vantage_points(
**kwargs
)
cli_util.render_response(result, ctx)
@http_monitor_group.command(name=cli_util.override('list_http_monitors.command_name', 'list'), help=u"""Gets a list of HTTP monitors.""")
@cli_util.option('--compartment-id', required=True, help=u"""Filters results by compartment.""")
@cli_util.option('--limit', type=click.INT, help=u"""The maximum number of items to return in a paginated \"List\" call.""")
@cli_util.option('--page', help=u"""The value of the `opc-next-page` response header from the previous \"List\" call.""")
@cli_util.option('--sort-by', type=custom_types.CliCaseInsensitiveChoice(["id", "displayName"]), help=u"""The field to sort by when listing monitors.""")
@cli_util.option('--sort-order', type=custom_types.CliCaseInsensitiveChoice(["ASC", "DESC"]), help=u"""Controls the sort order of results.""")
@cli_util.option('--display-name', help=u"""Filters results that exactly match the `displayName` field.""")
@cli_util.option('--all', 'all_pages', is_flag=True, help="""Fetches all pages of results. If you provide this option, then you cannot provide the --limit option.""")
@cli_util.option('--page-size', type=click.INT, help="""When fetching results, the number of results to fetch per call. Only valid when used with --all or --limit, and ignored otherwise.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'healthchecks', 'class': 'list[HttpMonitorSummary]'})
@cli_util.wrap_exceptions
def list_http_monitors(ctx, from_json, all_pages, page_size, compartment_id, limit, page, sort_by, sort_order, display_name):
if all_pages and limit:
raise click.UsageError('If you provide the --all option you cannot provide the --limit option')
kwargs = {}
if limit is not None:
kwargs['limit'] = limit
if page is not None:
kwargs['page'] = page
if sort_by is not None:
kwargs['sort_by'] = sort_by
if sort_order is not None:
kwargs['sort_order'] = sort_order
if display_name is not None:
kwargs['display_name'] = display_name
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('health_checks', ctx)
if all_pages:
if page_size:
kwargs['limit'] = page_size
result = cli_util.list_call_get_all_results(
client.list_http_monitors,
compartment_id=compartment_id,
**kwargs
)
elif limit is not None:
result = cli_util.list_call_get_up_to_limit(
client.list_http_monitors,
limit,
page_size,
compartment_id=compartment_id,
**kwargs
)
else:
result = client.list_http_monitors(
compartment_id=compartment_id,
**kwargs
)
cli_util.render_response(result, ctx)
@http_probe_result_group.command(name=cli_util.override('list_http_probe_results.command_name', 'list'), help=u"""Gets the HTTP probe results for the specified probe or monitor, where the `probeConfigurationId` is the OCID of either a monitor or an on-demand probe.""")
@cli_util.option('--probe-configuration-id', required=True, help=u"""The OCID of a monitor or on-demand probe.""")
@cli_util.option('--limit', type=click.INT, help=u"""The maximum number of items to return in a paginated \"List\" call.""")
@cli_util.option('--page', help=u"""The value of the `opc-next-page` response header from the previous \"List\" call.""")
@cli_util.option('--start-time-greater-than-or-equal-to', help=u"""Returns results with a `startTime` equal to or greater than the specified value.""")
@cli_util.option('--start-time-less-than-or-equal-to', help=u"""Returns results with a `startTime` equal to or less than the specified value.""")
@cli_util.option('--sort-order', type=custom_types.CliCaseInsensitiveChoice(["ASC", "DESC"]), help=u"""Controls the sort order of results.""")
@cli_util.option('--target', help=u"""Filters results that match the `target`.""")
@cli_util.option('--all', 'all_pages', is_flag=True, help="""Fetches all pages of results. If you provide this option, then you cannot provide the --limit option.""")
@cli_util.option('--page-size', type=click.INT, help="""When fetching results, the number of results to fetch per call. Only valid when used with --all or --limit, and ignored otherwise.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'healthchecks', 'class': 'list[HttpProbeResultSummary]'})
@cli_util.wrap_exceptions
def list_http_probe_results(ctx, from_json, all_pages, page_size, probe_configuration_id, limit, page, start_time_greater_than_or_equal_to, start_time_less_than_or_equal_to, sort_order, target):
if all_pages and limit:
raise click.UsageError('If you provide the --all option you cannot provide the --limit option')
if isinstance(probe_configuration_id, six.string_types) and len(probe_configuration_id.strip()) == 0:
raise click.UsageError('Parameter --probe-configuration-id cannot be whitespace or empty string')
kwargs = {}
if limit is not None:
kwargs['limit'] = limit
if page is not None:
kwargs['page'] = page
if start_time_greater_than_or_equal_to is not None:
kwargs['start_time_greater_than_or_equal_to'] = start_time_greater_than_or_equal_to
if start_time_less_than_or_equal_to is not None:
kwargs['start_time_less_than_or_equal_to'] = start_time_less_than_or_equal_to
if sort_order is not None:
kwargs['sort_order'] = sort_order
if target is not None:
kwargs['target'] = target
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('health_checks', ctx)
if all_pages:
if page_size:
kwargs['limit'] = page_size
result = cli_util.list_call_get_all_results(
client.list_http_probe_results,
probe_configuration_id=probe_configuration_id,
**kwargs
)
elif limit is not None:
result = cli_util.list_call_get_up_to_limit(
client.list_http_probe_results,
limit,
page_size,
probe_configuration_id=probe_configuration_id,
**kwargs
)
else:
result = client.list_http_probe_results(
probe_configuration_id=probe_configuration_id,
**kwargs
)
cli_util.render_response(result, ctx)
@ping_monitor_group.command(name=cli_util.override('list_ping_monitors.command_name', 'list'), help=u"""Gets a list of configured ping monitors.
Results are paginated based on `page` and `limit`. The `opc-next-page` header provides a URL for fetching the next page.""")
@cli_util.option('--compartment-id', required=True, help=u"""Filters results by compartment.""")
@cli_util.option('--limit', type=click.INT, help=u"""The maximum number of items to return in a paginated \"List\" call.""")
@cli_util.option('--page', help=u"""The value of the `opc-next-page` response header from the previous \"List\" call.""")
@cli_util.option('--sort-by', type=custom_types.CliCaseInsensitiveChoice(["id", "displayName"]), help=u"""The field to sort by when listing monitors.""")
@cli_util.option('--sort-order', type=custom_types.CliCaseInsensitiveChoice(["ASC", "DESC"]), help=u"""Controls the sort order of results.""")
@cli_util.option('--display-name', help=u"""Filters results that exactly match the `displayName` field.""")
@cli_util.option('--all', 'all_pages', is_flag=True, help="""Fetches all pages of results. If you provide this option, then you cannot provide the --limit option.""")
@cli_util.option('--page-size', type=click.INT, help="""When fetching results, the number of results to fetch per call. Only valid when used with --all or --limit, and ignored otherwise.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'healthchecks', 'class': 'list[PingMonitorSummary]'})
@cli_util.wrap_exceptions
def list_ping_monitors(ctx, from_json, all_pages, page_size, compartment_id, limit, page, sort_by, sort_order, display_name):
if all_pages and limit:
raise click.UsageError('If you provide the --all option you cannot provide the --limit option')
kwargs = {}
if limit is not None:
kwargs['limit'] = limit
if page is not None:
kwargs['page'] = page
if sort_by is not None:
kwargs['sort_by'] = sort_by
if sort_order is not None:
kwargs['sort_order'] = sort_order
if display_name is not None:
kwargs['display_name'] = display_name
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('health_checks', ctx)
if all_pages:
if page_size:
kwargs['limit'] = page_size
result = cli_util.list_call_get_all_results(
client.list_ping_monitors,
compartment_id=compartment_id,
**kwargs
)
elif limit is not None:
result = cli_util.list_call_get_up_to_limit(
client.list_ping_monitors,
limit,
page_size,
compartment_id=compartment_id,
**kwargs
)
else:
result = client.list_ping_monitors(
compartment_id=compartment_id,
**kwargs
)
cli_util.render_response(result, ctx)
@ping_probe_result_group.command(name=cli_util.override('list_ping_probe_results.command_name', 'list'), help=u"""Returns the results for the specified probe, where the `probeConfigurationId` is the OCID of either a monitor or an on-demand probe.
Results are paginated based on `page` and `limit`. The `opc-next-page` header provides a URL for fetching the next page. Use `sortOrder` to set the order of the results. If `sortOrder` is unspecified, results are sorted in ascending order by `startTime`.""")
@cli_util.option('--probe-configuration-id', required=True, help=u"""The OCID of a monitor or on-demand probe.""")
@cli_util.option('--limit', type=click.INT, help=u"""The maximum number of items to return in a paginated \"List\" call.""")
@cli_util.option('--page', help=u"""The value of the `opc-next-page` response header from the previous \"List\" call.""")
@cli_util.option('--start-time-greater-than-or-equal-to', help=u"""Returns results with a `startTime` equal to or greater than the specified value.""")
@cli_util.option('--start-time-less-than-or-equal-to', help=u"""Returns results with a `startTime` equal to or less than the specified value.""")
@cli_util.option('--sort-order', type=custom_types.CliCaseInsensitiveChoice(["ASC", "DESC"]), help=u"""Controls the sort order of results.""")
@cli_util.option('--target', help=u"""Filters results that match the `target`.""")
@cli_util.option('--all', 'all_pages', is_flag=True, help="""Fetches all pages of results. If you provide this option, then you cannot provide the --limit option.""")
@cli_util.option('--page-size', type=click.INT, help="""When fetching results, the number of results to fetch per call. Only valid when used with --all or --limit, and ignored otherwise.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'healthchecks', 'class': 'list[PingProbeResultSummary]'})
@cli_util.wrap_exceptions
def list_ping_probe_results(ctx, from_json, all_pages, page_size, probe_configuration_id, limit, page, start_time_greater_than_or_equal_to, start_time_less_than_or_equal_to, sort_order, target):
if all_pages and limit:
raise click.UsageError('If you provide the --all option you cannot provide the --limit option')
if isinstance(probe_configuration_id, six.string_types) and len(probe_configuration_id.strip()) == 0:
raise click.UsageError('Parameter --probe-configuration-id cannot be whitespace or empty string')
kwargs = {}
if limit is not None:
kwargs['limit'] = limit
if page is not None:
kwargs['page'] = page
if start_time_greater_than_or_equal_to is not None:
kwargs['start_time_greater_than_or_equal_to'] = start_time_greater_than_or_equal_to
if start_time_less_than_or_equal_to is not None:
kwargs['start_time_less_than_or_equal_to'] = start_time_less_than_or_equal_to
if sort_order is not None:
kwargs['sort_order'] = sort_order
if target is not None:
kwargs['target'] = target
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('health_checks', ctx)
if all_pages:
if page_size:
kwargs['limit'] = page_size
result = cli_util.list_call_get_all_results(
client.list_ping_probe_results,
probe_configuration_id=probe_configuration_id,
**kwargs
)
elif limit is not None:
result = cli_util.list_call_get_up_to_limit(
client.list_ping_probe_results,
limit,
page_size,
probe_configuration_id=probe_configuration_id,
**kwargs
)
else:
result = client.list_ping_probe_results(
probe_configuration_id=probe_configuration_id,
**kwargs
)
cli_util.render_response(result, ctx)
@http_monitor_group.command(name=cli_util.override('update_http_monitor.command_name', 'update'), help=u"""Updates the configuration of the specified HTTP monitor. Only the fields specified in the request body will be updated; all other configuration properties will remain unchanged.""")
@cli_util.option('--monitor-id', required=True, help=u"""The OCID of a monitor.""")
@cli_util.option('--targets', type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--vantage-point-names', type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--port', type=click.INT, help=u"""The port on which to probe endpoints. If unspecified, probes will use the default port of their protocol.""")
@cli_util.option('--timeout-in-seconds', type=click.INT, help=u"""The probe timeout in seconds. Valid values: 10, 20, 30, and 60. The probe timeout must be less than or equal to `intervalInSeconds` for monitors.""")
@cli_util.option('--protocol', type=custom_types.CliCaseInsensitiveChoice(["HTTP", "HTTPS"]), help=u"""""")
@cli_util.option('--method', type=custom_types.CliCaseInsensitiveChoice(["GET", "HEAD"]), help=u"""""")
@cli_util.option('--path', help=u"""The optional URL path to probe, including query parameters.""")
@cli_util.option('--headers', type=custom_types.CLI_COMPLEX_TYPE, help=u"""A dictionary of HTTP request headers.
*Note:* Monitors and probes do not support the use of the `Authorization` HTTP header.""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--display-name', help=u"""A user-friendly and mutable name suitable for display in a user interface.""")
@cli_util.option('--interval-in-seconds', type=click.INT, help=u"""The monitor interval in seconds. Valid values: 10, 30, and 60.""")
@cli_util.option('--is-enabled', type=click.BOOL, help=u"""Enables or disables the monitor. Set to 'true' to launch monitoring.""")
@cli_util.option('--freeform-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags]. Example: `{\"Department\": \"Finance\"}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--defined-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags]. Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@cli_util.option('--force', help="""Perform update without prompting for confirmation.""", is_flag=True)
@json_skeleton_utils.get_cli_json_input_option({'targets': {'module': 'healthchecks', 'class': 'list[string]'}, 'vantage-point-names': {'module': 'healthchecks', 'class': 'list[string]'}, 'headers': {'module': 'healthchecks', 'class': 'dict(str, string)'}, 'freeform-tags': {'module': 'healthchecks', 'class': 'dict(str, string)'}, 'defined-tags': {'module': 'healthchecks', 'class': 'dict(str, dict(str, object))'}})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'targets': {'module': 'healthchecks', 'class': 'list[string]'}, 'vantage-point-names': {'module': 'healthchecks', 'class': 'list[string]'}, 'headers': {'module': 'healthchecks', 'class': 'dict(str, string)'}, 'freeform-tags': {'module': 'healthchecks', 'class': 'dict(str, string)'}, 'defined-tags': {'module': 'healthchecks', 'class': 'dict(str, dict(str, object))'}}, output_type={'module': 'healthchecks', 'class': 'HttpMonitor'})
@cli_util.wrap_exceptions
def update_http_monitor(ctx, from_json, force, monitor_id, targets, vantage_point_names, port, timeout_in_seconds, protocol, method, path, headers, display_name, interval_in_seconds, is_enabled, freeform_tags, defined_tags, if_match):
if isinstance(monitor_id, six.string_types) and len(monitor_id.strip()) == 0:
raise click.UsageError('Parameter --monitor-id cannot be whitespace or empty string')
if not force:
if targets or vantage_point_names or headers or freeform_tags or defined_tags:
if not click.confirm("WARNING: Updates to targets and vantage-point-names and headers and freeform-tags and defined-tags will replace any existing values. Are you sure you want to continue?"):
ctx.abort()
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
details = {}
if targets is not None:
details['targets'] = cli_util.parse_json_parameter("targets", targets)
if vantage_point_names is not None:
details['vantagePointNames'] = cli_util.parse_json_parameter("vantage_point_names", vantage_point_names)
if port is not None:
details['port'] = port
if timeout_in_seconds is not None:
details['timeoutInSeconds'] = timeout_in_seconds
if protocol is not None:
details['protocol'] = protocol
if method is not None:
details['method'] = method
if path is not None:
details['path'] = path
if headers is not None:
details['headers'] = cli_util.parse_json_parameter("headers", headers)
if display_name is not None:
details['displayName'] = display_name
if interval_in_seconds is not None:
details['intervalInSeconds'] = interval_in_seconds
if is_enabled is not None:
details['isEnabled'] = is_enabled
if freeform_tags is not None:
details['freeformTags'] = cli_util.parse_json_parameter("freeform_tags", freeform_tags)
if defined_tags is not None:
details['definedTags'] = cli_util.parse_json_parameter("defined_tags", defined_tags)
client = cli_util.build_client('health_checks', ctx)
result = client.update_http_monitor(
monitor_id=monitor_id,
update_http_monitor_details=details,
**kwargs
)
cli_util.render_response(result, ctx)
@ping_monitor_group.command(name=cli_util.override('update_ping_monitor.command_name', 'update'), help=u"""Updates the configuration of the specified ping monitor. Only the fields specified in the request body will be updated; all other configuration properties will remain unchanged.""")
@cli_util.option('--monitor-id', required=True, help=u"""The OCID of a monitor.""")
@cli_util.option('--targets', type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--vantage-point-names', type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--port', type=click.INT, help=u"""The port on which to probe endpoints. If unspecified, probes will use the default port of their protocol.""")
@cli_util.option('--timeout-in-seconds', type=click.INT, help=u"""The probe timeout in seconds. Valid values: 10, 20, 30, and 60. The probe timeout must be less than or equal to `intervalInSeconds` for monitors.""")
@cli_util.option('--protocol', type=custom_types.CliCaseInsensitiveChoice(["ICMP", "TCP"]), help=u"""""")
@cli_util.option('--display-name', help=u"""A user-friendly and mutable name suitable for display in a user interface.""")
@cli_util.option('--interval-in-seconds', type=click.INT, help=u"""The monitor interval in seconds. Valid values: 10, 30, and 60.""")
@cli_util.option('--is-enabled', type=click.BOOL, help=u"""Enables or disables the monitor. Set to 'true' to launch monitoring.""")
@cli_util.option('--freeform-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags]. Example: `{\"Department\": \"Finance\"}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--defined-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags]. Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@cli_util.option('--force', help="""Perform update without prompting for confirmation.""", is_flag=True)
@json_skeleton_utils.get_cli_json_input_option({'targets': {'module': 'healthchecks', 'class': 'list[string]'}, 'vantage-point-names': {'module': 'healthchecks', 'class': 'list[string]'}, 'freeform-tags': {'module': 'healthchecks', 'class': 'dict(str, string)'}, 'defined-tags': {'module': 'healthchecks', 'class': 'dict(str, dict(str, object))'}})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'targets': {'module': 'healthchecks', 'class': 'list[string]'}, 'vantage-point-names': {'module': 'healthchecks', 'class': 'list[string]'}, 'freeform-tags': {'module': 'healthchecks', 'class': 'dict(str, string)'}, 'defined-tags': {'module': 'healthchecks', 'class': 'dict(str, dict(str, object))'}}, output_type={'module': 'healthchecks', 'class': 'PingMonitor'})
@cli_util.wrap_exceptions
def update_ping_monitor(ctx, from_json, force, monitor_id, targets, vantage_point_names, port, timeout_in_seconds, protocol, display_name, interval_in_seconds, is_enabled, freeform_tags, defined_tags, if_match):
if isinstance(monitor_id, six.string_types) and len(monitor_id.strip()) == 0:
raise click.UsageError('Parameter --monitor-id cannot be whitespace or empty string')
if not force:
if targets or vantage_point_names or freeform_tags or defined_tags:
if not click.confirm("WARNING: Updates to targets and vantage-point-names and freeform-tags and defined-tags will replace any existing values. Are you sure you want to continue?"):
ctx.abort()
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
details = {}
if targets is not None:
details['targets'] = cli_util.parse_json_parameter("targets", targets)
if vantage_point_names is not None:
details['vantagePointNames'] = cli_util.parse_json_parameter("vantage_point_names", vantage_point_names)
if port is not None:
details['port'] = port
if timeout_in_seconds is not None:
details['timeoutInSeconds'] = timeout_in_seconds
if protocol is not None:
details['protocol'] = protocol
if display_name is not None:
details['displayName'] = display_name
if interval_in_seconds is not None:
details['intervalInSeconds'] = interval_in_seconds
if is_enabled is not None:
details['isEnabled'] = is_enabled
if freeform_tags is not None:
details['freeformTags'] = cli_util.parse_json_parameter("freeform_tags", freeform_tags)
if defined_tags is not None:
details['definedTags'] = cli_util.parse_json_parameter("defined_tags", defined_tags)
client = cli_util.build_client('health_checks', ctx)
result = client.update_ping_monitor(
monitor_id=monitor_id,
update_ping_monitor_details=details,
**kwargs
)
cli_util.render_response(result, ctx)
| 63.656577
| 518
| 0.732905
| 7,790
| 54,681
| 4.910141
| 0.047497
| 0.048131
| 0.040444
| 0.01966
| 0.962876
| 0.948993
| 0.936654
| 0.918301
| 0.900261
| 0.891059
| 0
| 0.002901
| 0.136318
| 54,681
| 858
| 519
| 63.730769
| 0.807017
| 0.002652
| 0
| 0.805085
| 0
| 0.079096
| 0.393779
| 0.031727
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032486
| false
| 0.032486
| 0.015537
| 0
| 0.048023
| 0.001412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e6aec4b6a4e2f2414802b79885a709706e3bcc1e
| 14,983
|
py
|
Python
|
ironic_neutron_plugin/tests/unit/test_plugin.py
|
rackerlabs/ironic-neutron-plugin
|
7b3e19840048bc49d846362b84973c2f2b03b05e
|
[
"Apache-2.0"
] | 10
|
2015-01-21T22:04:40.000Z
|
2017-06-29T06:55:45.000Z
|
ironic_neutron_plugin/tests/unit/test_plugin.py
|
rackerlabs/ironic-neutron-plugin
|
7b3e19840048bc49d846362b84973c2f2b03b05e
|
[
"Apache-2.0"
] | null | null | null |
ironic_neutron_plugin/tests/unit/test_plugin.py
|
rackerlabs/ironic-neutron-plugin
|
7b3e19840048bc49d846362b84973c2f2b03b05e
|
[
"Apache-2.0"
] | 8
|
2015-01-30T16:40:30.000Z
|
2020-07-23T06:06:53.000Z
|
# Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ironic_neutron_plugin.tests import base
from neutron.tests.unit import test_db_plugin
"""
These are mostly copied wholesale or subclassed where possible
from the upstream ml2 tests.
TODO(morgabra) These aren't very 'unit-y' unit tests.
"""
class TestIronicBasicGet(base.IronicMl2MechanismTestCase,
test_db_plugin.TestBasicGet):
pass
class TestIronicV2HTTPResponse(base.IronicMl2MechanismTestCase,
test_db_plugin.TestV2HTTPResponse):
pass
class TestIronicNetworksV2(base.IronicMl2MechanismTestCase,
test_db_plugin.TestNetworksV2):
pass
class TestIronicSubnetsV2(base.IronicMl2MechanismTestCase,
test_db_plugin.TestSubnetsV2):
pass
class TestIronicPortsV2(base.IronicMl2MechanismTestCase,
test_db_plugin.TestPortsV2):
pass
class TestIronicPlugin(base.IronicMl2MechanismTestCase):
"""Plugin tests that exercise the 'switch', 'commit',
and 'trunked' extensions.
"""
_dummy_data = True
def test_create_no_commit_with_no_switchports(self):
port = self._make_port_with_switchports(
network=self.net1['network']['id'],
commit=False)
self.assertEqual(
port['port']['network_id'], self.net1['network']['id'])
self.assertHWDriverNotCalled()
def test_create_commit_with_no_switchports_raises(self):
self._make_port_with_switchports(
network=self.net1['network']['id'],
commit=True,
expected_status_code=400)
self.assertHWDriverNotCalled()
def test_create_commit_no_hardware_id(self):
switchports = self._make_switchports(
self.fmt, [self.switch1, self.switch2],
self.hardware_id, ['eth1/1', 'eth1/1'], ['eth0', 'eth1']
)
ports = []
if switchports:
for swp in switchports['switchports']:
ports.append(self._make_switchport_req(swp))
res = self._create_port(
self.fmt,
self.net1['network']['id'],
context=self.context,
arg_list=('trunked', 'commit', 'switch:ports'),
**{
'trunked': False,
'commit': True,
'switch:ports': ports,
}
)
self.assertEqual(res.status_code, 400)
self.assertTrue(
"switch:ports requires switch:hardware_id" in res.body)
def test_create_commit_with_switchports(self):
"""Base sanity test for port_create()."""
switchports = self._make_switchports(
self.fmt, [self.switch1, self.switch2],
self.hardware_id, ['eth1/1', 'eth1/1'], ['eth0', 'eth1']
)
port = self._make_port_with_switchports(
network=self.net1['network']['id'],
switchports=switchports,
commit=True)
# assert the response returned extension data
self.assertEqual(port['port']['switch:hardware_id'],
self.hardware_id)
self.assertEqual(len(port['port']['switch:ports']), 2)
self.assertContains(switchports['switchports'][0],
port['port']['switch:ports'][0])
self.assertContains(switchports['switchports'][1],
port['port']['switch:ports'][1])
self.assertEqual(port['port']['commit'], True)
self.assertEqual(port['port']['trunked'], False)
# assert that the switch hardware driver was called for
# each switchport
self.assertHWDriverNotCalled(exclude='create')
self.assertEqual(self.hw_driver.create.call_count, 2)
def test_create_no_commit_with_switchports(self):
switchports = self._make_switchports(
self.fmt, [self.switch1, self.switch2],
self.hardware_id, ['eth1/1', 'eth1/1'], ['eth0', 'eth1']
)
port = self._make_port_with_switchports(
network=self.net1['network']['id'],
switchports=switchports)
# assert the response returned extension data
self.assertEqual(port['port']['switch:hardware_id'],
self.hardware_id)
self.assertEqual(len(port['port']['switch:ports']), 2)
self.assertContains(switchports['switchports'][0],
port['port']['switch:ports'][0])
self.assertContains(switchports['switchports'][1],
port['port']['switch:ports'][1])
self.assertEqual(port['port']['commit'], False)
self.assertEqual(port['port']['trunked'], False)
self.assertHWDriverNotCalled()
def test_update_commit_no_switchports_raises(self):
port = self._make_port_with_switchports(
network=self.net1['network']['id'],
commit=False)
req = self.new_update_request(
resource='ports',
data={"port": {"commit": True}},
id=port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_code, 400)
self.assertTrue("no switchports found" in res.body)
self.assertHWDriverNotCalled()
def test_update_commit_with_switchports(self):
switchports = self._make_switchports(
self.fmt, [self.switch1, self.switch2],
self.hardware_id, ['eth1/1', 'eth1/1'], ['eth0', 'eth1']
)
ports = []
for swp in switchports['switchports']:
ports.append(self._make_switchport_req(swp))
port = self._make_port_with_switchports(
network=self.net1['network']['id'],
commit=False)
req = self.new_update_request(
resource='ports',
data={"port": {"commit": True,
"switch:ports": ports,
"switch:hardware_id": self.hardware_id}},
id=port['port']['id'])
res = req.get_response(self.api)
res_body = res.json.copy()
self.assertEqual(res.status_code, 200)
self.assertNotEqual(port['port']['commit'], res_body['port']['commit'])
port['port'].pop('commit')
res_body['port'].pop('commit')
self.assertContains(port['port'], res_body['port'])
self.assertHWDriverNotCalled(exclude='create')
self.assertEqual(self.hw_driver.create.call_count, 2)
def test_update_trunked(self):
port = self._make_port_with_switchports(
network=self.net1['network']['id'],
commit=False)
req = self.new_update_request(
resource='ports',
data={"port": {"trunked": True}},
id=port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_code, 200)
self.assertNotEqual(port['port']['trunked'],
res.json['port']['trunked'])
self.assertHWDriverNotCalled()
def test_update_trunked_when_comitted_raises(self):
switchports = self._make_switchports(
self.fmt, [self.switch1, self.switch2],
self.hardware_id, ['eth1/1', 'eth1/1'], ['eth0', 'eth1']
)
port = self._make_port_with_switchports(
network=self.net1['network']['id'],
switchports=switchports,
commit=True)
self.assertHWDriverNotCalled(exclude='create')
self.assertEqual(self.hw_driver.create.call_count, 2)
req = self.new_update_request(resource='ports',
data={"port": {"trunked": True}},
id=port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_code, 400)
self.assertTrue("cannot update trunked flag" in res.body)
self.assertHWDriverNotCalled(exclude='create')
self.assertEqual(self.hw_driver.create.call_count, 2)
def test_attach_trunked_to_trunked(self):
switchports = self._make_switchports(
self.fmt, [self.switch1, self.switch2],
self.hardware_id, ['eth1/1', 'eth1/1'], ['eth0', 'eth1']
)
port1 = self._make_port_with_switchports(
network=self.net1['network']['id'],
switchports=switchports,
trunked=True,
commit=True)
self.assertHWDriverNotCalled(exclude='create')
self.assertEqual(self.hw_driver.create.call_count, 2)
port2 = self._make_port_with_switchports(
network=self.net2['network']['id'],
switchports=switchports,
trunked=True,
commit=True)
self.assertHWDriverNotCalled(exclude=['create', 'attach'])
self.assertEqual(self.hw_driver.create.call_count, 2)
self.assertEqual(self.hw_driver.attach.call_count, 2)
self.assertEqual(port1['port']['network_id'],
self.net1['network']['id'])
self.assertEqual(port1['port']['commit'], True)
self.assertEqual(port1['port']['trunked'], True)
self.assertEqual(port2['port']['network_id'],
self.net2['network']['id'])
self.assertEqual(port2['port']['commit'], True)
self.assertEqual(port2['port']['trunked'], True)
def test_attach_trunked_to_not_trunked(self):
switchports = self._make_switchports(
self.fmt, [self.switch1, self.switch2],
self.hardware_id, ['eth1/1', 'eth1/1'], ['eth0', 'eth1']
)
port1 = self._make_port_with_switchports(
network=self.net1['network']['id'],
switchports=switchports,
trunked=True,
commit=True)
self.assertHWDriverNotCalled(exclude='create')
self.assertEqual(self.hw_driver.create.call_count, 2)
port2 = self._make_port_with_switchports(
network=self.net2['network']['id'],
switchports=switchports,
trunked=False,
commit=True,
expected_status_code=400)
self.assertHWDriverNotCalled(exclude='create')
self.assertEqual(self.hw_driver.create.call_count, 2)
self.assertEqual(port1['port']['network_id'],
self.net1['network']['id'])
self.assertEqual(port1['port']['commit'], True)
self.assertEqual(port1['port']['trunked'], True)
msg = 'Cannot attach non-trunked network, port already bound'
self.assertTrue(msg in str(port2))
def test_attach_not_trunked_to_trunked(self):
switchports = self._make_switchports(
self.fmt, [self.switch1, self.switch2],
self.hardware_id, ['eth1/1', 'eth1/1'], ['eth0', 'eth1']
)
port1 = self._make_port_with_switchports(
network=self.net1['network']['id'],
switchports=switchports,
trunked=False,
commit=True)
self.assertHWDriverNotCalled(exclude='create')
self.assertEqual(self.hw_driver.create.call_count, 2)
port2 = self._make_port_with_switchports(
network=self.net2['network']['id'],
switchports=switchports,
trunked=True,
commit=True,
expected_status_code=400)
self.assertHWDriverNotCalled(exclude='create')
self.assertEqual(self.hw_driver.create.call_count, 2)
self.assertEqual(port1['port']['network_id'],
self.net1['network']['id'])
self.assertEqual(port1['port']['commit'], True)
self.assertEqual(port1['port']['trunked'], False)
self.assertTrue('Already attached via non-trunked port' in str(port2))
def test_attach_not_trunked_to_not_trunked(self):
switchports = self._make_switchports(
self.fmt, [self.switch1, self.switch2],
self.hardware_id, ['eth1/1', 'eth1/1'], ['eth0', 'eth1']
)
port1 = self._make_port_with_switchports(
network=self.net1['network']['id'],
switchports=switchports,
trunked=False,
commit=True)
self.assertHWDriverNotCalled(exclude='create')
self.assertEqual(self.hw_driver.create.call_count, 2)
port2 = self._make_port_with_switchports(
network=self.net2['network']['id'],
switchports=switchports,
trunked=False,
commit=True,
expected_status_code=400)
self.assertHWDriverNotCalled(exclude='create')
self.assertEqual(self.hw_driver.create.call_count, 2)
self.assertEqual(port1['port']['network_id'],
self.net1['network']['id'])
self.assertEqual(port1['port']['commit'], True)
self.assertEqual(port1['port']['trunked'], False)
msg = 'Cannot attach non-trunked network, port already bound'
self.assertTrue(msg in str(port2))
def test_delete(self):
switchports = self._make_switchports(
self.fmt, [self.switch1, self.switch2],
self.hardware_id, ['eth1/1', 'eth1/1'], ['eth0', 'eth1']
)
port1 = self._make_port_with_switchports(
network=self.net1['network']['id'],
switchports=switchports,
trunked=True,
commit=True)
self.assertHWDriverNotCalled(exclude='create')
self.assertEqual(self.hw_driver.create.call_count, 2)
port2 = self._make_port_with_switchports(
network=self.net2['network']['id'],
switchports=switchports,
trunked=True,
commit=True)
self.assertHWDriverNotCalled(exclude=['create', 'attach'])
self.assertEqual(self.hw_driver.create.call_count, 2)
self.assertEqual(self.hw_driver.attach.call_count, 2)
self._delete('ports', port1['port']['id'])
self.assertHWDriverNotCalled(exclude=['create', 'attach', 'detach'])
self.assertEqual(self.hw_driver.create.call_count, 2)
self.assertEqual(self.hw_driver.attach.call_count, 2)
self.assertEqual(self.hw_driver.detach.call_count, 2)
self._delete('ports', port2['port']['id'])
self.assertEqual(self.hw_driver.create.call_count, 2)
self.assertEqual(self.hw_driver.attach.call_count, 2)
self.assertEqual(self.hw_driver.detach.call_count, 2)
self.assertEqual(self.hw_driver.delete.call_count, 2)
| 37.740554
| 79
| 0.607088
| 1,607
| 14,983
| 5.485376
| 0.120722
| 0.088486
| 0.049575
| 0.054793
| 0.810096
| 0.757005
| 0.720023
| 0.720023
| 0.702552
| 0.698582
| 0
| 0.019764
| 0.263832
| 14,983
| 396
| 80
| 37.835859
| 0.77942
| 0.055596
| 0
| 0.728188
| 0
| 0
| 0.104807
| 0
| 0
| 0
| 0
| 0.002525
| 0.285235
| 1
| 0.04698
| false
| 0.016779
| 0.006711
| 0
| 0.077181
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e6b28173f2465b185617fd1a59e4abb404f9de37
| 7,595
|
py
|
Python
|
neural_sp/models/seq2seq/encoders/build.py
|
ernie-mlg/neural_sp
|
103020c987c37d7d1ff281c9258810d122e55615
|
[
"Apache-2.0"
] | 2
|
2021-01-25T02:55:09.000Z
|
2021-02-05T03:47:05.000Z
|
neural_sp/models/seq2seq/encoders/build.py
|
ernie-mlg/neural_sp
|
103020c987c37d7d1ff281c9258810d122e55615
|
[
"Apache-2.0"
] | null | null | null |
neural_sp/models/seq2seq/encoders/build.py
|
ernie-mlg/neural_sp
|
103020c987c37d7d1ff281c9258810d122e55615
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Select an encoder network."""
def build_encoder(args):
# safeguard
if not hasattr(args, 'transformer_enc_d_model') and hasattr(args, 'transformer_d_model'):
args.transformer_enc_d_model = args.transformer_d_model
args.transformer_dec_d_model = args.transformer_d_model
if not hasattr(args, 'transformer_enc_d_ff') and hasattr(args, 'transformer_d_ff'):
args.transformer_enc_d_ff = args.transformer_d_ff
if not hasattr(args, 'transformer_enc_n_heads') and hasattr(args, 'transformer_n_heads'):
args.transformer_enc_n_heads = args.transformer_n_heads
if args.enc_type == 'tds':
from neural_sp.models.seq2seq.encoders.tds import TDSEncoder
encoder = TDSEncoder(
input_dim=args.input_dim * args.n_stacks,
in_channel=args.conv_in_channel,
channels=args.conv_channels,
kernel_sizes=args.conv_kernel_sizes,
dropout=args.dropout_enc,
last_proj_dim=args.transformer_dec_d_model if 'transformer' in args.dec_type else args.dec_n_units)
elif args.enc_type == 'gated_conv':
from neural_sp.models.seq2seq.encoders.gated_conv import GatedConvEncoder
encoder = GatedConvEncoder(
input_dim=args.input_dim * args.n_stacks,
in_channel=args.conv_in_channel,
channels=args.conv_channels,
kernel_sizes=args.conv_kernel_sizes,
dropout=args.dropout_enc,
last_proj_dim=args.transformer_dec_d_model if 'transformer' in args.dec_type else args.dec_n_units,
param_init=args.param_init)
elif 'transformer' in args.enc_type:
from neural_sp.models.seq2seq.encoders.transformer import TransformerEncoder
encoder = TransformerEncoder(
input_dim=args.input_dim if args.input_type == 'speech' else args.emb_dim,
enc_type=args.enc_type,
n_heads=args.transformer_enc_n_heads,
n_layers=args.enc_n_layers,
n_layers_sub1=args.enc_n_layers_sub1,
n_layers_sub2=args.enc_n_layers_sub2,
d_model=args.transformer_enc_d_model,
d_ff=args.transformer_enc_d_ff,
ffn_bottleneck_dim=args.transformer_ffn_bottleneck_dim,
ffn_activation=args.transformer_ffn_activation,
pe_type=args.transformer_enc_pe_type,
layer_norm_eps=args.transformer_layer_norm_eps,
last_proj_dim=args.transformer_dec_d_model if 'transformer' in args.dec_type else 0,
dropout_in=args.dropout_in,
dropout=args.dropout_enc,
dropout_att=args.dropout_att,
dropout_layer=args.dropout_enc_layer,
subsample=args.subsample,
subsample_type=args.subsample_type,
n_stacks=args.n_stacks,
n_splices=args.n_splices,
conv_in_channel=args.conv_in_channel,
conv_channels=args.conv_channels,
conv_kernel_sizes=args.conv_kernel_sizes,
conv_strides=args.conv_strides,
conv_poolings=args.conv_poolings,
conv_batch_norm=args.conv_batch_norm,
conv_layer_norm=args.conv_layer_norm,
conv_bottleneck_dim=args.conv_bottleneck_dim,
conv_param_init=args.param_init,
task_specific_layer=args.task_specific_layer,
param_init=args.transformer_param_init,
clamp_len=args.transformer_enc_clamp_len,
lookahead=args.transformer_enc_lookaheads,
chunk_size_left=args.lc_chunk_size_left,
chunk_size_current=args.lc_chunk_size_current,
chunk_size_right=args.lc_chunk_size_right,
streaming_type=args.lc_type)
elif 'conformer' in args.enc_type:
from neural_sp.models.seq2seq.encoders.conformer import ConformerEncoder
encoder = ConformerEncoder(
input_dim=args.input_dim if args.input_type == 'speech' else args.emb_dim,
enc_type=args.enc_type,
n_heads=args.transformer_enc_n_heads,
kernel_size=args.conformer_kernel_size,
normalization=args.conformer_normalization,
n_layers=args.enc_n_layers,
n_layers_sub1=args.enc_n_layers_sub1,
n_layers_sub2=args.enc_n_layers_sub2,
d_model=args.transformer_enc_d_model,
d_ff=args.transformer_enc_d_ff,
ffn_bottleneck_dim=args.transformer_ffn_bottleneck_dim,
ffn_activation='swish',
pe_type=args.transformer_enc_pe_type,
layer_norm_eps=args.transformer_layer_norm_eps,
last_proj_dim=args.transformer_dec_d_model if 'transformer' in args.dec_type else 0,
dropout_in=args.dropout_in,
dropout=args.dropout_enc,
dropout_att=args.dropout_att,
dropout_layer=args.dropout_enc_layer,
subsample=args.subsample,
subsample_type=args.subsample_type,
n_stacks=args.n_stacks,
n_splices=args.n_splices,
conv_in_channel=args.conv_in_channel,
conv_channels=args.conv_channels,
conv_kernel_sizes=args.conv_kernel_sizes,
conv_strides=args.conv_strides,
conv_poolings=args.conv_poolings,
conv_batch_norm=args.conv_batch_norm,
conv_layer_norm=args.conv_layer_norm,
conv_bottleneck_dim=args.conv_bottleneck_dim,
conv_param_init=args.param_init,
task_specific_layer=args.task_specific_layer,
param_init=args.transformer_param_init,
clamp_len=args.transformer_enc_clamp_len,
lookahead=args.transformer_enc_lookaheads,
chunk_size_left=args.lc_chunk_size_left,
chunk_size_current=args.lc_chunk_size_current,
chunk_size_right=args.lc_chunk_size_right,
streaming_type=args.lc_type)
else:
from neural_sp.models.seq2seq.encoders.rnn import RNNEncoder
encoder = RNNEncoder(
input_dim=args.input_dim if args.input_type == 'speech' else args.emb_dim,
enc_type=args.enc_type,
n_units=args.enc_n_units,
n_projs=args.enc_n_projs,
last_proj_dim=args.transformer_dec_d_model if 'transformer' in args.dec_type else 0,
n_layers=args.enc_n_layers,
n_layers_sub1=args.enc_n_layers_sub1,
n_layers_sub2=args.enc_n_layers_sub2,
dropout_in=args.dropout_in,
dropout=args.dropout_enc,
subsample=args.subsample,
subsample_type=args.subsample_type,
n_stacks=args.n_stacks,
n_splices=args.n_splices,
conv_in_channel=args.conv_in_channel,
conv_channels=args.conv_channels,
conv_kernel_sizes=args.conv_kernel_sizes,
conv_strides=args.conv_strides,
conv_poolings=args.conv_poolings,
conv_batch_norm=args.conv_batch_norm,
conv_layer_norm=args.conv_layer_norm,
conv_bottleneck_dim=args.conv_bottleneck_dim,
bidir_sum_fwd_bwd=args.bidirectional_sum_fwd_bwd,
task_specific_layer=args.task_specific_layer,
param_init=args.param_init,
chunk_size_current=args.lc_chunk_size_left, # for compatibility
chunk_size_right=args.lc_chunk_size_right,
cnn_lookahead=args.cnn_lookahead,
rsp_prob=args.rsp_prob_enc)
return encoder
| 47.767296
| 111
| 0.682686
| 998
| 7,595
| 4.758517
| 0.113226
| 0.120025
| 0.068225
| 0.026532
| 0.837861
| 0.819752
| 0.776163
| 0.738892
| 0.731733
| 0.722678
| 0
| 0.004905
| 0.248321
| 7,595
| 158
| 112
| 48.06962
| 0.82694
| 0.021593
| 0
| 0.736111
| 0
| 0
| 0.031124
| 0.006198
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006944
| false
| 0
| 0.034722
| 0
| 0.048611
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e6d0cb14468a6f0896bb2286005a6fa5ca0d26c0
| 101
|
py
|
Python
|
panda_gym/envs/robots/__init__.py
|
lubiluk/panda-gym
|
3f70c0306938ca0684268590c9c036070dbf7cb9
|
[
"MIT"
] | null | null | null |
panda_gym/envs/robots/__init__.py
|
lubiluk/panda-gym
|
3f70c0306938ca0684268590c9c036070dbf7cb9
|
[
"MIT"
] | null | null | null |
panda_gym/envs/robots/__init__.py
|
lubiluk/panda-gym
|
3f70c0306938ca0684268590c9c036070dbf7cb9
|
[
"MIT"
] | null | null | null |
from panda_gym.envs.robots.panda import Panda
from panda_gym.envs.robots.free_panda import FreePanda
| 33.666667
| 54
| 0.861386
| 17
| 101
| 4.941176
| 0.470588
| 0.214286
| 0.285714
| 0.380952
| 0.52381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079208
| 101
| 2
| 55
| 50.5
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
e6e2a06501ac6c103f8faf547e29a917b359aec0
| 24,000
|
py
|
Python
|
tests/components/hive/test_config_flow.py
|
orcema/core
|
ce144bf63145813c76fbbe4f9423341764695057
|
[
"Apache-2.0"
] | null | null | null |
tests/components/hive/test_config_flow.py
|
orcema/core
|
ce144bf63145813c76fbbe4f9423341764695057
|
[
"Apache-2.0"
] | null | null | null |
tests/components/hive/test_config_flow.py
|
orcema/core
|
ce144bf63145813c76fbbe4f9423341764695057
|
[
"Apache-2.0"
] | null | null | null |
"""Test the Hive config flow."""
from unittest.mock import patch
from apyhiveapi.helper import hive_exceptions
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.hive.const import CONF_CODE, CONF_DEVICE_NAME, DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_SCAN_INTERVAL, CONF_USERNAME
from tests.common import MockConfigEntry
USERNAME = "username@home-assistant.com"
UPDATED_USERNAME = "updated_username@home-assistant.com"
PASSWORD = "test-password"
UPDATED_PASSWORD = "updated-password"
INCORRECT_PASSWORD = "incorrect-password"
SCAN_INTERVAL = 120
UPDATED_SCAN_INTERVAL = 60
DEVICE_NAME = "Test Home Assistant"
MFA_CODE = "1234"
MFA_RESEND_CODE = "0000"
MFA_INVALID_CODE = "HIVE"
async def test_import_flow(hass):
"""Check import flow."""
with patch(
"homeassistant.components.hive.config_flow.Auth.login",
return_value={
"ChallengeName": "SUCCESS",
"AuthenticationResult": {
"RefreshToken": "mock-refresh-token",
"AccessToken": "mock-access-token",
},
},
), patch(
"homeassistant.components.hive.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.hive.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == USERNAME
assert result["data"] == {
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
"tokens": {
"AuthenticationResult": {
"AccessToken": "mock-access-token",
"RefreshToken": "mock-refresh-token",
},
"ChallengeName": "SUCCESS",
},
}
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_user_flow(hass):
"""Test the user flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.hive.config_flow.Auth.login",
return_value={
"ChallengeName": "SUCCESS",
"AuthenticationResult": {
"RefreshToken": "mock-refresh-token",
"AccessToken": "mock-access-token",
},
},
), patch(
"homeassistant.components.hive.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.hive.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == USERNAME
assert result2["data"] == {
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
"tokens": {
"AuthenticationResult": {
"AccessToken": "mock-access-token",
"RefreshToken": "mock-refresh-token",
},
"ChallengeName": "SUCCESS",
},
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
async def test_user_flow_2fa(hass):
"""Test user flow with 2FA."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.hive.config_flow.Auth.login",
return_value={
"ChallengeName": "SMS_MFA",
},
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == CONF_CODE
assert result2["errors"] == {}
with patch(
"homeassistant.components.hive.config_flow.Auth.sms_2fa",
return_value={
"ChallengeName": "SUCCESS",
"AuthenticationResult": {
"RefreshToken": "mock-refresh-token",
"AccessToken": "mock-access-token",
},
},
):
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_CODE: MFA_CODE,
},
)
assert result3["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result3["step_id"] == "configuration"
assert result3["errors"] == {}
with patch(
"homeassistant.components.hive.config_flow.Auth.device_registration",
return_value=True,
), patch(
"homeassistant.components.hive.config_flow.Auth.get_device_data",
return_value=[
"mock-device-group-key",
"mock-device-key",
"mock-device-password",
],
), patch(
"homeassistant.components.hive.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.hive.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result4 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_DEVICE_NAME: DEVICE_NAME,
},
)
await hass.async_block_till_done()
assert result4["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result4["title"] == USERNAME
assert result4["data"] == {
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
"tokens": {
"AuthenticationResult": {
"AccessToken": "mock-access-token",
"RefreshToken": "mock-refresh-token",
},
"ChallengeName": "SUCCESS",
},
"device_data": [
"mock-device-group-key",
"mock-device-key",
"mock-device-password",
],
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
async def test_reauth_flow(hass):
"""Test the reauth flow."""
mock_config = MockConfigEntry(
domain=DOMAIN,
unique_id=USERNAME,
data={
CONF_USERNAME: USERNAME,
CONF_PASSWORD: INCORRECT_PASSWORD,
"tokens": {
"AccessToken": "mock-access-token",
"RefreshToken": "mock-refresh-token",
},
},
)
mock_config.add_to_hass(hass)
with patch(
"homeassistant.components.hive.config_flow.Auth.login",
side_effect=hive_exceptions.HiveInvalidPassword(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": mock_config.unique_id,
},
data=mock_config.data,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "invalid_password"}
with patch(
"homeassistant.components.hive.config_flow.Auth.login",
return_value={
"ChallengeName": "SUCCESS",
"AuthenticationResult": {
"RefreshToken": "mock-refresh-token",
"AccessToken": "mock-access-token",
},
},
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: USERNAME,
CONF_PASSWORD: UPDATED_PASSWORD,
},
)
await hass.async_block_till_done()
assert mock_config.data.get("username") == USERNAME
assert mock_config.data.get("password") == UPDATED_PASSWORD
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "reauth_successful"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
async def test_reauth_2fa_flow(hass):
"""Test the reauth flow."""
mock_config = MockConfigEntry(
domain=DOMAIN,
unique_id=USERNAME,
data={
CONF_USERNAME: USERNAME,
CONF_PASSWORD: INCORRECT_PASSWORD,
"tokens": {
"AccessToken": "mock-access-token",
"RefreshToken": "mock-refresh-token",
},
},
)
mock_config.add_to_hass(hass)
with patch(
"homeassistant.components.hive.config_flow.Auth.login",
side_effect=hive_exceptions.HiveInvalidPassword(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": mock_config.unique_id,
},
data=mock_config.data,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "invalid_password"}
with patch(
"homeassistant.components.hive.config_flow.Auth.login",
return_value={
"ChallengeName": "SMS_MFA",
},
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: USERNAME,
CONF_PASSWORD: UPDATED_PASSWORD,
},
)
with patch(
"homeassistant.components.hive.config_flow.Auth.sms_2fa",
return_value={
"ChallengeName": "SUCCESS",
"AuthenticationResult": {
"RefreshToken": "mock-refresh-token",
"AccessToken": "mock-access-token",
},
},
), patch(
"homeassistant.components.hive.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{
CONF_CODE: MFA_CODE,
},
)
await hass.async_block_till_done()
assert mock_config.data.get("username") == USERNAME
assert mock_config.data.get("password") == UPDATED_PASSWORD
assert result3["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result3["reason"] == "reauth_successful"
assert len(mock_setup_entry.mock_calls) == 1
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
async def test_option_flow(hass):
"""Test config flow options."""
entry = MockConfigEntry(
domain=DOMAIN,
title=USERNAME,
data={
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
"device_data": [
"mock-device-group-key",
"mock-device-key",
"mock-device-password",
],
},
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(
entry.entry_id,
data=None,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_SCAN_INTERVAL: UPDATED_SCAN_INTERVAL}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_SCAN_INTERVAL] == UPDATED_SCAN_INTERVAL
async def test_user_flow_2fa_send_new_code(hass):
"""Resend a 2FA code if it didn't arrive."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.hive.config_flow.Auth.login",
return_value={
"ChallengeName": "SMS_MFA",
},
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == CONF_CODE
assert result2["errors"] == {}
with patch(
"homeassistant.components.hive.config_flow.Auth.login",
return_value={
"ChallengeName": "SMS_MFA",
},
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"], {CONF_CODE: MFA_RESEND_CODE}
)
await hass.async_block_till_done()
assert result3["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result3["step_id"] == CONF_CODE
assert result3["errors"] == {}
with patch(
"homeassistant.components.hive.config_flow.Auth.sms_2fa",
return_value={
"ChallengeName": "SUCCESS",
"AuthenticationResult": {
"RefreshToken": "mock-refresh-token",
"AccessToken": "mock-access-token",
},
},
):
result4 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_CODE: MFA_CODE,
},
)
assert result4["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result4["step_id"] == "configuration"
assert result4["errors"] == {}
with patch(
"homeassistant.components.hive.config_flow.Auth.device_registration",
return_value=True,
), patch(
"homeassistant.components.hive.config_flow.Auth.get_device_data",
return_value=[
"mock-device-group-key",
"mock-device-key",
"mock-device-password",
],
), patch(
"homeassistant.components.hive.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.hive.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result5 = await hass.config_entries.flow.async_configure(
result4["flow_id"], {CONF_DEVICE_NAME: DEVICE_NAME}
)
await hass.async_block_till_done()
assert result5["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result5["title"] == USERNAME
assert result5["data"] == {
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
"tokens": {
"AuthenticationResult": {
"AccessToken": "mock-access-token",
"RefreshToken": "mock-refresh-token",
},
"ChallengeName": "SUCCESS",
},
"device_data": [
"mock-device-group-key",
"mock-device-key",
"mock-device-password",
],
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
async def test_abort_if_existing_entry(hass):
"""Check flow abort when an entry already exist."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=USERNAME,
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
options={CONF_SCAN_INTERVAL: SCAN_INTERVAL},
)
config_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
data={
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_user_flow_invalid_username(hass):
"""Test user flow with invalid username."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.hive.config_flow.Auth.login",
side_effect=hive_exceptions.HiveInvalidUsername(),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "invalid_username"}
async def test_user_flow_invalid_password(hass):
"""Test user flow with invalid password."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.hive.config_flow.Auth.login",
side_effect=hive_exceptions.HiveInvalidPassword(),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "invalid_password"}
async def test_user_flow_no_internet_connection(hass):
"""Test user flow with no internet connection."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.hive.config_flow.Auth.login",
side_effect=hive_exceptions.HiveApiError(),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "no_internet_available"}
async def test_user_flow_2fa_no_internet_connection(hass):
"""Test user flow with no internet connection."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.hive.config_flow.Auth.login",
return_value={
"ChallengeName": "SMS_MFA",
},
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == CONF_CODE
assert result2["errors"] == {}
with patch(
"homeassistant.components.hive.config_flow.Auth.sms_2fa",
side_effect=hive_exceptions.HiveApiError(),
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{CONF_CODE: MFA_CODE},
)
assert result3["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result3["step_id"] == CONF_CODE
assert result3["errors"] == {"base": "no_internet_available"}
async def test_user_flow_2fa_invalid_code(hass):
"""Test user flow with 2FA."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.hive.config_flow.Auth.login",
return_value={
"ChallengeName": "SMS_MFA",
},
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == CONF_CODE
assert result2["errors"] == {}
with patch(
"homeassistant.components.hive.config_flow.Auth.sms_2fa",
side_effect=hive_exceptions.HiveInvalid2FACode(),
):
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_CODE: MFA_INVALID_CODE},
)
assert result3["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result3["step_id"] == CONF_CODE
assert result3["errors"] == {"base": "invalid_code"}
async def test_user_flow_unknown_error(hass):
"""Test user flow when unknown error occurs."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.hive.config_flow.Auth.login",
return_value={"ChallengeName": "FAILED", "InvalidAuthenticationResult": {}},
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
async def test_user_flow_2fa_unknown_error(hass):
"""Test 2fa flow when unknown error occurs."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.hive.config_flow.Auth.login",
return_value={
"ChallengeName": "SMS_MFA",
},
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == CONF_CODE
with patch(
"homeassistant.components.hive.config_flow.Auth.sms_2fa",
return_value={"ChallengeName": "FAILED", "InvalidAuthenticationResult": {}},
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{CONF_CODE: MFA_CODE},
)
assert result3["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result3["step_id"] == "configuration"
assert result3["errors"] == {}
with patch(
"homeassistant.components.hive.config_flow.Auth.device_registration",
return_value=True,
), patch(
"homeassistant.components.hive.config_flow.Auth.get_device_data",
return_value=[
"mock-device-group-key",
"mock-device-key",
"mock-device-password",
],
):
result4 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_DEVICE_NAME: DEVICE_NAME},
)
await hass.async_block_till_done()
assert result4["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result4["step_id"] == "configuration"
assert result4["errors"] == {"base": "unknown"}
| 32.476319
| 84
| 0.6195
| 2,539
| 24,000
| 5.578574
| 0.053958
| 0.055069
| 0.05401
| 0.060576
| 0.904406
| 0.888097
| 0.868187
| 0.859715
| 0.84856
| 0.838111
| 0
| 0.007399
| 0.262292
| 24,000
| 738
| 85
| 32.520325
| 0.792601
| 0.001083
| 0
| 0.711039
| 0
| 0
| 0.208316
| 0.094992
| 0
| 0
| 0
| 0
| 0.180195
| 1
| 0
| false
| 0.066558
| 0.012987
| 0
| 0.012987
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
fc3f7bf9d07ec420d868862710f7a8bf26ca5192
| 26,736
|
py
|
Python
|
anuga/coordinate_transforms/tests/test_geo_reference.py
|
samcom12/anuga_core
|
f4378114dbf02d666fe6423de45798add5c42806
|
[
"Python-2.0",
"OLDAP-2.7"
] | null | null | null |
anuga/coordinate_transforms/tests/test_geo_reference.py
|
samcom12/anuga_core
|
f4378114dbf02d666fe6423de45798add5c42806
|
[
"Python-2.0",
"OLDAP-2.7"
] | null | null | null |
anuga/coordinate_transforms/tests/test_geo_reference.py
|
samcom12/anuga_core
|
f4378114dbf02d666fe6423de45798add5c42806
|
[
"Python-2.0",
"OLDAP-2.7"
] | null | null | null |
#!/usr/bin/env python
#
from builtins import zip
from builtins import str
import unittest
import tempfile
import os
from anuga.coordinate_transforms.geo_reference import *
from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a
import numpy as num
class geo_referenceTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_origin(self):
g = Geo_reference(56,1.9,1.9)
(z,x,y) = g.get_origin()
self.assertTrue(z == g.get_zone(), ' failed')
self.assertTrue(x == g.get_xllcorner(), ' failed')
self.assertTrue(y == g.get_yllcorner(), ' failed')
def test_read_write_NetCDF(self):
from anuga.file.netcdf import NetCDFFile
g = Geo_reference(56,1.9,1.9)
file_name = tempfile.mktemp(".geo_referenceTest")
out_file = NetCDFFile(file_name, netcdf_mode_w)
g.write_NetCDF(out_file)
out_file.close()
in_file = NetCDFFile(file_name, netcdf_mode_r)
new_g = Geo_reference(NetCDFObject=in_file)
in_file.close()
os.remove(file_name)
self.assertTrue(g == new_g, 'test_read_write_NetCDF failed')
def test_read_NetCDFI(self):
# test if read_NetCDF
from anuga.file.netcdf import NetCDFFile
g = Geo_reference(56,1.9,1.9)
file_name = tempfile.mktemp(".geo_referenceTest")
outfile = NetCDFFile(file_name, netcdf_mode_w)
g.write_NetCDF(outfile)
outfile.close()
in_file = NetCDFFile(file_name, netcdf_mode_r)
new_g = Geo_reference(NetCDFObject=in_file)
in_file.close()
os.remove(file_name)
self.assertTrue(g == new_g, ' failed')
def test_read_write_ASCII(self):
from anuga.file.netcdf import NetCDFFile
g = Geo_reference(56,1.9,1.9)
file_name = tempfile.mktemp(".geo_referenceTest")
fd = open(file_name,'w')
g.write_ASCII(fd)
fd.close()
fd = open(file_name,'r')
new_g = Geo_reference(ASCIIFile=fd)
fd.close()
os.remove(file_name)
self.assertTrue(g == new_g, 'test_read_write_ASCII failed')
def test_read_write_ASCII2(self):
from anuga.file.netcdf import NetCDFFile
g = Geo_reference(56,1.9,1.9)
file_name = tempfile.mktemp(".geo_referenceTest")
fd = open(file_name,'w')
g.write_ASCII(fd)
fd.close()
fd = open(file_name,'r')
line = fd.readline()
new_g = Geo_reference(ASCIIFile=fd, read_title=line)
fd.close()
os.remove(file_name)
self.assertTrue(g == new_g, 'test_read_write_ASCII failed')
def test_read_write_ASCII3(self):
from anuga.file.netcdf import NetCDFFile
g = Geo_reference(56,1.9,1.9)
file_name = tempfile.mktemp(".geo_referenceTest")
fd = open(file_name,'w')
g.write_ASCII(fd)
fd.close()
fd = open(file_name,'r')
line = fd.readline()
line = "fail !!"
try:
new_g = Geo_reference(ASCIIFile=fd, read_title=line)
fd.close()
os.remove(file_name)
except TitleError:
fd.close()
os.remove(file_name)
else:
self.assertTrue(0 ==1,
'bad text file did not raise error!')
def test_change_points_geo_ref(self):
x = 433.0
y = 3.0
g = Geo_reference(56,x,y)
lofl = [[3.0,311.0], [677.0,6.0]]
new_lofl = g.change_points_geo_ref(lofl)
self.assertTrue(isinstance(new_lofl, list), ' failed')
self.assertTrue(type(new_lofl) == type(lofl), ' failed')
for point,new_point in zip(lofl,new_lofl):
self.assertTrue(point[0]-x==new_point[0], ' failed')
self.assertTrue(point[1]-y==new_point[1], ' failed')
def test_change_points_geo_ref2(self):
x = 3.0
y = 543.0
g = Geo_reference(56,x,y)
lofl = [[3.0,388.0]]
new_lofl = g.change_points_geo_ref(lofl)
self.assertTrue(isinstance(new_lofl, list), ' failed')
self.assertTrue(type(new_lofl) == type(lofl), ' failed')
for point,new_point in zip(lofl,new_lofl):
self.assertTrue(point[0]-x==new_point[0], ' failed')
self.assertTrue(point[1]-y==new_point[1], ' failed')
def test_change_points_geo_ref3(self):
x = 3.0
y = 443.0
g = Geo_reference(56,x,y)
lofl = [3.0,345.0]
new_lofl = g.change_points_geo_ref(lofl)
self.assertTrue(isinstance(new_lofl, list), ' failed')
self.assertTrue(type(new_lofl) == type(lofl), ' failed')
for point,new_point in zip([lofl],new_lofl):
self.assertTrue(point[0]-x==new_point[0], ' failed')
self.assertTrue(point[1]-y==new_point[1], ' failed')
def test_change_points_geo_ref4(self):
x = 3.0
y = 443.0
g = Geo_reference(56,x,y)
lofl = num.array([[3.0,323.0], [6.0,645.0]])
new_lofl = g.change_points_geo_ref(lofl)
self.assertTrue(isinstance(new_lofl, num.ndarray), ' failed')
self.assertTrue(type(new_lofl) == type(lofl), ' failed')
lofl[:,0] -= x
lofl[:,1] -= y
assert num.allclose(lofl,new_lofl)
def test_change_points_geo_ref5(self):
x = 103.0
y = 3.0
g = Geo_reference(56,x,y)
lofl = num.array([[3.0,323.0]])
new_lofl = g.change_points_geo_ref(lofl.copy())
self.assertTrue(isinstance(new_lofl, num.ndarray), ' failed')
self.assertTrue(type(new_lofl) == type(lofl), ' failed')
for point,new_point in zip(lofl,new_lofl):
self.assertTrue(point[0]-x==new_point[0], ' failed')
self.assertTrue(point[1]-y==new_point[1], ' failed')
def test_change_points_geo_ref6(self):
x = 53.0
y = 3.0
g = Geo_reference(56,x,y)
lofl = num.array([355.0,3.0])
new_lofl = g.change_points_geo_ref(lofl.copy())
self.assertTrue(isinstance(new_lofl, num.ndarray), ' failed')
self.assertTrue(type(new_lofl) == type(lofl), ' failed')
for point,new_point in zip([lofl],new_lofl):
self.assertTrue(point[0]-x==new_point[0], ' failed')
self.assertTrue(point[1]-y==new_point[1], ' failed')
def test_change_points_geo_ref7(self):
x = 23.0
y = 3.0
point_x = 9.0
point_y = -60.0
g = Geo_reference(56,x,y)
points_geo_ref = Geo_reference(56,point_x,point_y)
lofl = [[3.0,30.0], [67.0,6.0]]
new_lofl = g.change_points_geo_ref(lofl,points_geo_ref=points_geo_ref)
self.assertTrue(isinstance(new_lofl, list), ' failed')
self.assertTrue(type(new_lofl) == type(lofl), ' failed')
for point,new_point in zip(lofl,new_lofl):
self.assertTrue(point[0]+point_x-x==new_point[0], ' failed')
self.assertTrue(point[1]+point_y-y==new_point[1], ' failed')
def test_get_absolute_list(self):
# test with supplied offsets
x = 7.0
y = 3.0
g = Geo_reference(56, x, y)
points = [[3.0,34.0], [64.0,6.0]]
new_points = g.get_absolute(points)
self.assertTrue(isinstance(new_points, list), 'failed')
self.assertTrue(type(new_points) == type(points), 'failed')
for point, new_point in zip(points, new_points):
self.assertTrue(point[0]+x == new_point[0], 'failed')
self.assertTrue(point[1]+y == new_point[1], 'failed')
# test with no supplied offsets
g = Geo_reference()
points = [[3.0,34.0], [64.0,6.0]]
new_points = g.get_absolute(points)
self.assertTrue(isinstance(new_points, list), 'failed')
self.assertTrue(type(new_points) == type(points), 'failed')
for point, new_point in zip(points, new_points):
self.assertTrue(point[0] == new_point[0], 'failed')
self.assertTrue(point[1] == new_point[1], 'failed')
# test that calling get_absolute twice does the right thing
# first call
dx = 10.0
dy = 12.0
g = Geo_reference(56, dx, dy)
points = [[3.0,34.0], [64.0,6.0]]
expected_new_points = [[3.0+dx,34.0+dy], [64.0+dx,6.0+dy]]
new_points = g.get_absolute(points)
self.assertTrue(isinstance(new_points, list), 'failed')
self.assertTrue(type(new_points) == type(points), 'failed')
self.assertTrue(new_points == expected_new_points, 'failed')
# and repeat from 'new_points = g.get_absolute(points)' above
# to see if second call with same input gives same results.
new_points = g.get_absolute(points)
self.assertTrue(isinstance(new_points, list), 'failed')
self.assertTrue(type(new_points) == type(points), 'failed')
self.assertTrue(new_points == expected_new_points, 'failed')
def test_get_absolute_array(self):
'''Same test as test_get_absolute_list(), but with numeric arrays.'''
# test with supplied offsets
x = 7.0
y = 3.0
g = Geo_reference(56, x, y)
points = num.array([[3.0,34.0], [64.0,6.0]])
new_points = g.get_absolute(points)
self.assertTrue(isinstance(new_points, num.ndarray), 'failed')
self.assertTrue(type(new_points) == type(points), 'failed')
msg = 'points=\n%s\nnew_points=\n%s' % (str(points), str(new_points))
for point, new_point in zip(points, new_points):
self.assertTrue(point[0]+x == new_point[0], msg)
self.assertTrue(point[1]+y == new_point[1], msg)
# test with no supplied offsets
g = Geo_reference()
points = num.array([[3.0,34.0], [64.0,6.0]])
new_points = g.get_absolute(points)
self.assertTrue(isinstance(new_points, num.ndarray), 'failed')
self.assertTrue(type(new_points) == type(points), 'failed')
self.assertTrue(num.alltrue(points == new_points), 'failed')
# test that calling get_absolute twice does the right thing
# first call
dx = 11.0
dy = 13.0
g = Geo_reference(56, dx, dy)
points = num.array([[3.0,34.0], [64.0,6.0]])
expected_new_points = num.array([[3.0+dx,34.0+dy], [64.0+dx,6.0+dy]])
new_points = g.get_absolute(points)
self.assertTrue(isinstance(new_points, num.ndarray), 'failed')
self.assertTrue(type(new_points) == type(points), 'failed')
msg = ('First call of .get_absolute() returned %s\nexpected %s'
% (str(new_points), str(expected_new_points)))
self.assertTrue(num.alltrue(expected_new_points == new_points), msg)
# and repeat from 'new_points = g.get_absolute(points)' above
# to see if second call with same input gives same results.
new_points = g.get_absolute(points)
self.assertTrue(isinstance(new_points, num.ndarray), 'failed')
self.assertTrue(type(new_points) == type(points), 'failed')
msg = ('Second call of .get_absolute() returned\n%s\nexpected\n%s'
% (str(new_points), str(expected_new_points)))
self.assertTrue(num.alltrue(expected_new_points == new_points), msg)
# and repeat again to see if *third* call with same input
# gives same results.
new_points = g.get_absolute(points)
self.assertTrue(isinstance(new_points, num.ndarray), 'failed')
self.assertTrue(type(new_points) == type(points), 'failed')
msg = ('Third call of .get_absolute() returned %s\nexpected %s'
% (str(new_points), str(expected_new_points)))
self.assertTrue(num.alltrue(expected_new_points == new_points), msg)
def test_get_relative_list(self):
# test with supplied offsets
x = 7.0
y = 3.0
g = Geo_reference(56, x, y)
points = [[3.0,34.0], [64.0,6.0]]
new_points = g.get_relative(points)
self.assertTrue(isinstance(new_points, list), 'failed')
self.assertTrue(type(new_points) == type(points), 'failed')
for point, new_point in zip(points, new_points):
self.assertTrue(point[0]-x == new_point[0], 'failed')
self.assertTrue(point[1]-y == new_point[1], 'failed')
# test with no supplied offsets
g = Geo_reference()
points = [[3.0,34.0], [64.0,6.0]]
new_points = g.get_relative(points)
self.assertTrue(isinstance(new_points, list), 'failed')
self.assertTrue(type(new_points) == type(points), 'failed')
for point, new_point in zip(points, new_points):
self.assertTrue(point[0] == new_point[0], 'failed')
self.assertTrue(point[1] == new_point[1], 'failed')
# test that calling get_absolute twice does the right thing
# first call
dx = 10.0
dy = 12.0
g = Geo_reference(56, dx, dy)
points = [[3.0,34.0], [64.0,6.0]]
expected_new_points = [[3.0-dx,34.0-dy], [64.0-dx,6.0-dy]]
new_points = g.get_relative(points)
self.assertTrue(isinstance(new_points, list), 'failed')
self.assertTrue(type(new_points) == type(points), 'failed')
self.assertTrue(new_points == expected_new_points, 'failed')
# and repeat from 'new_points = g.get_absolute(points)' above
# to see if second call with same input gives same results.
new_points = g.get_relative(points)
self.assertTrue(isinstance(new_points, list), 'failed')
self.assertTrue(type(new_points) == type(points), 'failed')
self.assertTrue(new_points == expected_new_points, 'failed')
def test_get_relative_array(self):
'''Same test as test_get_relative_list(), but with numeric arrays.'''
# test with supplied offsets
x = 7.0
y = 3.0
g = Geo_reference(56, x, y)
points = num.array([[3.0,34.0], [64.0,6.0]])
new_points = g.get_relative(points)
self.assertTrue(isinstance(new_points, num.ndarray), 'failed')
self.assertTrue(type(new_points) == type(points), 'failed')
msg = 'points=\n%s\nnew_points=\n%s' % (str(points), str(new_points))
for point, new_point in zip(points, new_points):
self.assertTrue(point[0]-x == new_point[0], msg)
self.assertTrue(point[1]-y == new_point[1], msg)
# test with no supplied offsets
g = Geo_reference()
points = num.array([[3.0,34.0], [64.0,6.0]])
new_points = g.get_relative(points)
self.assertTrue(isinstance(new_points, num.ndarray), 'failed')
self.assertTrue(type(new_points) == type(points), 'failed')
self.assertTrue(num.alltrue(points == new_points), 'failed')
# test that calling get_relative twice does the right thing
# first call
dx = 11.0
dy = 13.0
g = Geo_reference(56, dx, dy)
points = num.array([[3.0,34.0], [64.0,6.0]])
expected_new_points = num.array([[3.0-dx,34.0-dy], [64.0-dx,6.0-dy]])
new_points = g.get_relative(points)
self.assertTrue(isinstance(new_points, num.ndarray), 'failed')
self.assertTrue(type(new_points) == type(points), 'failed')
msg = ('First call of .get_relative() returned %s\nexpected %s'
% (str(new_points), str(expected_new_points)))
self.assertTrue(num.alltrue(expected_new_points == new_points), msg)
# and repeat from 'new_points = g.get_relative(points)' above
# to see if second call with same input gives same results.
new_points = g.get_relative(points)
self.assertTrue(isinstance(new_points, num.ndarray), 'failed')
self.assertTrue(type(new_points) == type(points), 'failed')
msg = ('Second call of .get_relative() returned\n%s\nexpected\n%s'
% (str(new_points), str(expected_new_points)))
self.assertTrue(num.alltrue(expected_new_points == new_points), msg)
# and repeat again to see if *third* call with same input
# gives same results.
new_points = g.get_relative(points)
self.assertTrue(isinstance(new_points, num.ndarray), 'failed')
self.assertTrue(type(new_points) == type(points), 'failed')
msg = ('Third call of .get_relative() returned %s\nexpected %s'
% (str(new_points), str(expected_new_points)))
self.assertTrue(num.alltrue(expected_new_points == new_points), msg)
def test_is_absolute(self):
g = Geo_reference(34,0,0)
points = [[3.0,34.0], [64.0,6.0]]
assert g.is_absolute()
g = Geo_reference(34,7,-6)
assert not g.is_absolute()
def test___cmp__(self):
g = Geo_reference(56,1.9,1.9,)
new_g = Geo_reference(56,1.9,1.9)
self.assertTrue(g == new_g, 'test___cmp__ failed')
def test_reconcile(self):
g1 = Geo_reference(56,2,5)
g2 = Geo_reference(50,4,5)
g3 = Geo_reference(50,66,6)
g_default = Geo_reference()
g2.reconcile_zones(g3)
assert g2.get_zone() == g3.get_zone()
g_default.reconcile_zones(g3)
assert g_default.get_zone() == g3.get_zone()
g_default = Geo_reference()
g3.reconcile_zones(g_default)
assert g_default.get_zone() == g3.get_zone()
try:
g1.reconcile_zones(g2)
except:
pass
else:
msg = 'Should have raised an exception'
raise Exception(msg)
def test_bad_ASCII_title(self):
# create an text file
point_file = tempfile.mktemp(".xxx")
fd = open(point_file,'w')
fd.write("# hey! \n")
fd.close()
fd = open(point_file,'r')
#
#new_g = Geo_reference(ASCIIFile=fd)
try:
new_g = Geo_reference(ASCIIFile=fd)
fd.close()
os.remove(point_file)
except TitleError:
fd.close()
os.remove(point_file)
else:
self.assertTrue(0 ==1,
'bad text file did not raise error!')
os.remove(point_file)
def test_read_write_ASCII_test_and_fail(self):
from anuga.file.netcdf import NetCDFFile
# This is to test a fail
g = Geo_reference(56,1.9,1.9)
file_name = tempfile.mktemp(".geo_referenceTest")
fd = open(file_name,'w')
g.write_ASCII(fd)
fd.close()
fd = open(file_name,'r')
line = fd.readline()
line = " #Geo"
try:
new_g = Geo_reference(ASCIIFile=fd, read_title=line)
fd.close()
os.remove(file_name)
except TitleError:
fd.close()
os.remove(file_name)
else:
self.assertTrue(0 ==1,
'bad text file did not raise error!')
# this tests a pass
g = Geo_reference(56,1.9,1.9)
file_name = tempfile.mktemp(".geo_referenceTest")
fd = open(file_name,'w')
g.write_ASCII(fd)
fd.close()
fd = open(file_name,'r')
line = fd.readline()
line = "#geo_yeah"
new_g = Geo_reference(ASCIIFile=fd, read_title=line)
fd.close()
os.remove(file_name)
self.assertTrue(g == new_g, 'test_read_write_ASCII failed')
# this tests a pass
g = Geo_reference(56,1.9,1.9)
file_name = tempfile.mktemp(".geo_referenceTest")
fd = open(file_name,'w')
g.write_ASCII(fd)
fd.close()
fd = open(file_name,'r')
line = fd.readline()
line = "#geo crap"
new_g = Geo_reference(ASCIIFile=fd, read_title=line)
fd.close()
os.remove(file_name)
self.assertTrue(g == new_g, 'test_read_write_ASCII failed')
def test_good_title(self):
# create an .xxx file
point_file = tempfile.mktemp(".xxx")
fd = open(point_file,'w')
fd.write("#Geo crap \n 56\n ")
fd.close()
fd = open(point_file,'r')
#
#new_g = Geo_reference(ASCIIFile=fd)
try:
new_g = Geo_reference(ASCIIFile=fd)
fd.close()
os.remove(point_file)
except ValueError:
fd.close()
os.remove(point_file)
else:
self.assertTrue(0 ==1,
'bad text file did not raise error!')
os.remove(point_file)
def test_error_message_ShapeError(self):
new_g = Geo_reference()
try:
new_g.get_absolute((8.9, 7.8, 9.0))
except ShapeError:
pass
else:
self.assertTrue(0 ==1,
'bad shape did not raise error!')
os.remove(point_file)
new_g = Geo_reference()
try:
new_g.get_absolute(((8.9, 7.8, 9.0)))
except ShapeError:
pass
else:
self.assertTrue(0 ==1,
'bad shape did not raise error!')
os.remove(point_file)
def test_functionality_get_absolute(self):
x0 = 1000.0
y0 = 2000.0
geo = Geo_reference(56, x0, y0)
# iterable points (*not* num.array())
points = ((2,3), (3,1), (5,2))
abs_points = geo.get_absolute(points)
# check we haven't changed 'points' itself
self.assertFalse(num.alltrue(abs_points == points))
new_points = abs_points.copy()
new_points[:,0] -= x0
new_points[:,1] -= y0
self.assertTrue(num.alltrue(new_points == points))
# points in num.array()
points = num.array(((2,3), (3,1), (5,2)), float)
abs_points = geo.get_absolute(points)
# check we haven't changed 'points' itself
self.assertFalse(num.alltrue(abs_points == points))
new_points = abs_points.copy()
new_points[:,0] -= x0
new_points[:,1] -= y0
self.assertTrue(num.alltrue(new_points == points))
def test_georef_types(self):
'''Ensure that attributes of a georeference are of correct type.
zone int
false_easting int
false_northing int
xllcorner float
yllcorner float
'''
from anuga.file.netcdf import NetCDFFile
# ensure that basic instance attributes are correct
g = Geo_reference(56, 1.8, 1.8)
self.assertTrue(isinstance(g.zone, int),
"geo_ref .zone should be 'int' type, "
"was '%s' type" % type(g.zone))
self.assertTrue(isinstance(g.false_easting, int),
"geo_ref .false_easting should be int type, "
"was '%s' type" % type(g.false_easting))
self.assertTrue(isinstance(g.false_northing, int),
"geo_ref .false_northing should be int type, "
"was '%s' type" % type(g.false_northing))
self.assertTrue(isinstance(g.xllcorner, float),
"geo_ref .xllcorner should be float type, "
"was '%s' type" % type(g.xllcorner))
self.assertTrue(isinstance(g.yllcorner, float),
"geo_ref .yllcorner should be float type, "
"was '%s' type" % type(g.yllcorner))
# now write fikle, read back and check types again
file_name = tempfile.mktemp(".geo_referenceTest")
out_file = NetCDFFile(file_name, netcdf_mode_w)
g.write_NetCDF(out_file)
out_file.close()
in_file = NetCDFFile(file_name, netcdf_mode_r)
new_g = Geo_reference(NetCDFObject=in_file)
in_file.close()
os.remove(file_name)
self.assertTrue(isinstance(new_g.zone, int),
"geo_ref .zone should be 'int' type, "
"was '%s' type" % type(new_g.zone))
self.assertTrue(isinstance(new_g.false_easting, int),
"geo_ref .false_easting should be int type, "
"was '%s' type" % type(new_g.false_easting))
self.assertTrue(isinstance(new_g.false_northing, int),
"geo_ref .false_northing should be int type, "
"was '%s' type" % type(new_g.false_northing))
self.assertTrue(isinstance(new_g.xllcorner, float),
"geo_ref .xllcorner should be float type, "
"was '%s' type" % type(new_g.xllcorner))
self.assertTrue(isinstance(new_g.yllcorner, float),
"geo_ref .yllcorner should be float type, "
"was '%s' type" % type(new_g.yllcorner))
def test_georef_types_coerceable(self):
'''Ensure that attributes of a georeference are of correct type.
zone int
false_easting int
false_northing int
xllcorner float
yllcorner float
'''
# now provide wrong types but coerceable
g = Geo_reference(56.0, '1.8', '1.8')
self.assertTrue(isinstance(g.zone, int),
"geo_ref .zone should be 'int' type, "
"was '%s' type" % type(g.zone))
self.assertTrue(isinstance(g.false_easting, int),
"geo_ref .false_easting should be int type, "
"was '%s' type" % type(g.false_easting))
self.assertTrue(isinstance(g.false_northing, int),
"geo_ref .false_northing should be int type, "
"was '%s' type" % type(g.false_northing))
self.assertTrue(isinstance(g.xllcorner, float),
"geo_ref .xllcorner should be float type, "
"was '%s' type" % type(g.xllcorner))
self.assertTrue(isinstance(g.yllcorner, float),
"geo_ref .yllcorner should be float type, "
"was '%s' type" % type(g.yllcorner))
#-------------------------------------------------------------
if __name__ == "__main__":
suite = unittest.makeSuite(geo_referenceTestCase, 'test')
runner = unittest.TextTestRunner() #verbosity=2)
runner.run(suite)
| 37.393007
| 78
| 0.572412
| 3,520
| 26,736
| 4.16108
| 0.069886
| 0.113743
| 0.04349
| 0.055301
| 0.884345
| 0.869257
| 0.856899
| 0.846931
| 0.839899
| 0.830409
| 0
| 0.030923
| 0.299671
| 26,736
| 714
| 79
| 37.445378
| 0.751335
| 0.080491
| 0
| 0.746154
| 0
| 0
| 0.096471
| 0.00877
| 0
| 0
| 0
| 0
| 0.244231
| 1
| 0.055769
| false
| 0.009615
| 0.028846
| 0
| 0.086538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fc7d08b5cb4f7680fa889586354849482f236afa
| 40,986
|
py
|
Python
|
sdk/keyvault/azure-keyvault-secrets/azure/keyvault/secrets/_generated/v7_3_preview/aio/operations/_key_vault_client_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/keyvault/azure-keyvault-secrets/azure/keyvault/secrets/_generated/v7_3_preview/aio/operations/_key_vault_client_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/keyvault/azure-keyvault-secrets/azure/keyvault/secrets/_generated/v7_3_preview/aio/operations/_key_vault_client_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class KeyVaultClientOperationsMixin:
async def set_secret(
self,
vault_base_url: str,
secret_name: str,
parameters: "_models.SecretSetParameters",
**kwargs: Any
) -> "_models.SecretBundle":
"""Sets a secret in a specified key vault.
The SET operation adds a secret to the Azure Key Vault. If the named secret already exists,
Azure Key Vault creates a new version of that secret. This operation requires the secrets/set
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:param parameters: The parameters for setting the secret.
:type parameters: ~azure.keyvault.v7_3_preview.models.SecretSetParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v7_3_preview.models.SecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "7.3-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.set_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str', pattern=r'^[0-9a-zA-Z-]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SecretSetParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_secret.metadata = {'url': '/secrets/{secret-name}'} # type: ignore
async def delete_secret(
self,
vault_base_url: str,
secret_name: str,
**kwargs: Any
) -> "_models.DeletedSecretBundle":
"""Deletes a secret from a specified key vault.
The DELETE operation applies to any secret stored in Azure Key Vault. DELETE cannot be applied
to an individual version of a secret. This operation requires the secrets/delete permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeletedSecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v7_3_preview.models.DeletedSecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedSecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "7.3-preview"
accept = "application/json"
# Construct URL
url = self.delete_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DeletedSecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_secret.metadata = {'url': '/secrets/{secret-name}'} # type: ignore
async def update_secret(
self,
vault_base_url: str,
secret_name: str,
secret_version: str,
parameters: "_models.SecretUpdateParameters",
**kwargs: Any
) -> "_models.SecretBundle":
"""Updates the attributes associated with a specified secret in a given key vault.
The UPDATE operation changes specified attributes of an existing stored secret. Attributes that
are not specified in the request are left unchanged. The value of a secret itself cannot be
changed. This operation requires the secrets/set permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:param secret_version: The version of the secret.
:type secret_version: str
:param parameters: The parameters for update secret operation.
:type parameters: ~azure.keyvault.v7_3_preview.models.SecretUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v7_3_preview.models.SecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "7.3-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
'secret-version': self._serialize.url("secret_version", secret_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SecretUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_secret.metadata = {'url': '/secrets/{secret-name}/{secret-version}'} # type: ignore
async def get_secret(
self,
vault_base_url: str,
secret_name: str,
secret_version: str,
**kwargs: Any
) -> "_models.SecretBundle":
"""Get a specified secret from a given key vault.
The GET operation is applicable to any secret stored in Azure Key Vault. This operation
requires the secrets/get permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:param secret_version: The version of the secret. This URI fragment is optional. If not
specified, the latest version of the secret is returned.
:type secret_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v7_3_preview.models.SecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "7.3-preview"
accept = "application/json"
# Construct URL
url = self.get_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
'secret-version': self._serialize.url("secret_version", secret_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_secret.metadata = {'url': '/secrets/{secret-name}/{secret-version}'} # type: ignore
def get_secrets(
self,
vault_base_url: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.SecretListResult"]:
"""List secrets in a specified key vault.
The Get Secrets operation is applicable to the entire vault. However, only the base secret
identifier and its attributes are provided in the response. Individual secret versions are not
listed in the response. This operation requires the secrets/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param maxresults: Maximum number of results to return in a page. If not specified, the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecretListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v7_3_preview.models.SecretListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "7.3-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_secrets.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SecretListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_secrets.metadata = {'url': '/secrets'} # type: ignore
def get_secret_versions(
self,
vault_base_url: str,
secret_name: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.SecretListResult"]:
"""List all versions of the specified secret.
The full secret identifier and attributes are provided in the response. No values are returned
for the secrets. This operations requires the secrets/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:param maxresults: Maximum number of results to return in a page. If not specified, the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecretListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v7_3_preview.models.SecretListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "7.3-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_secret_versions.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SecretListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_secret_versions.metadata = {'url': '/secrets/{secret-name}/versions'} # type: ignore
def get_deleted_secrets(
self,
vault_base_url: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeletedSecretListResult"]:
"""Lists deleted secrets for the specified vault.
The Get Deleted Secrets operation returns the secrets that have been deleted for a vault
enabled for soft-delete. This operation requires the secrets/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeletedSecretListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v7_3_preview.models.DeletedSecretListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedSecretListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "7.3-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_deleted_secrets.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DeletedSecretListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_deleted_secrets.metadata = {'url': '/deletedsecrets'} # type: ignore
async def get_deleted_secret(
self,
vault_base_url: str,
secret_name: str,
**kwargs: Any
) -> "_models.DeletedSecretBundle":
"""Gets the specified deleted secret.
The Get Deleted Secret operation returns the specified deleted secret along with its
attributes. This operation requires the secrets/get permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeletedSecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v7_3_preview.models.DeletedSecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedSecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "7.3-preview"
accept = "application/json"
# Construct URL
url = self.get_deleted_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DeletedSecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_deleted_secret.metadata = {'url': '/deletedsecrets/{secret-name}'} # type: ignore
async def purge_deleted_secret(
self,
vault_base_url: str,
secret_name: str,
**kwargs: Any
) -> None:
"""Permanently deletes the specified secret.
The purge deleted secret operation removes the secret permanently, without the possibility of
recovery. This operation can only be enabled on a soft-delete enabled vault. This operation
requires the secrets/purge permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "7.3-preview"
accept = "application/json"
# Construct URL
url = self.purge_deleted_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
purge_deleted_secret.metadata = {'url': '/deletedsecrets/{secret-name}'} # type: ignore
async def recover_deleted_secret(
self,
vault_base_url: str,
secret_name: str,
**kwargs: Any
) -> "_models.SecretBundle":
"""Recovers the deleted secret to the latest version.
Recovers the deleted secret in the specified vault. This operation can only be performed on a
soft-delete enabled vault. This operation requires the secrets/recover permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the deleted secret.
:type secret_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v7_3_preview.models.SecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "7.3-preview"
accept = "application/json"
# Construct URL
url = self.recover_deleted_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
recover_deleted_secret.metadata = {'url': '/deletedsecrets/{secret-name}/recover'} # type: ignore
async def backup_secret(
self,
vault_base_url: str,
secret_name: str,
**kwargs: Any
) -> "_models.BackupSecretResult":
"""Backs up the specified secret.
Requests that a backup of the specified secret be downloaded to the client. All versions of the
secret will be downloaded. This operation requires the secrets/backup permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackupSecretResult, or the result of cls(response)
:rtype: ~azure.keyvault.v7_3_preview.models.BackupSecretResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupSecretResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "7.3-preview"
accept = "application/json"
# Construct URL
url = self.backup_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('BackupSecretResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
backup_secret.metadata = {'url': '/secrets/{secret-name}/backup'} # type: ignore
async def restore_secret(
self,
vault_base_url: str,
parameters: "_models.SecretRestoreParameters",
**kwargs: Any
) -> "_models.SecretBundle":
"""Restores a backed up secret to a vault.
Restores a backed up secret, and all its versions, to a vault. This operation requires the
secrets/restore permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param parameters: The parameters to restore the secret.
:type parameters: ~azure.keyvault.v7_3_preview.models.SecretRestoreParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v7_3_preview.models.SecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "7.3-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.restore_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SecretRestoreParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
restore_secret.metadata = {'url': '/secrets/restore'} # type: ignore
| 47.936842
| 133
| 0.657468
| 4,585
| 40,986
| 5.67241
| 0.060414
| 0.022839
| 0.030452
| 0.022493
| 0.879691
| 0.865118
| 0.851469
| 0.848508
| 0.843279
| 0.835666
| 0
| 0.006895
| 0.242766
| 40,986
| 854
| 134
| 47.992974
| 0.831099
| 0.124384
| 0
| 0.818004
| 0
| 0
| 0.104696
| 0.02111
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011742
| false
| 0
| 0.013699
| 0
| 0.084149
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fc900ffa743c0daa98dd98d94e834fa52c62bf9e
| 24,052
|
py
|
Python
|
inputs_test.py
|
zhonglihanzhu/tensorflow-objectDetection
|
aa3d1b754d5c78b8401ce86d4c20f45741fc2b77
|
[
"Apache-2.0"
] | 1
|
2019-04-24T16:32:11.000Z
|
2019-04-24T16:32:11.000Z
|
inputs_test.py
|
zhonglihanzhu/tensorflow-objectDetection
|
aa3d1b754d5c78b8401ce86d4c20f45741fc2b77
|
[
"Apache-2.0"
] | null | null | null |
inputs_test.py
|
zhonglihanzhu/tensorflow-objectDetection
|
aa3d1b754d5c78b8401ce86d4c20f45741fc2b77
|
[
"Apache-2.0"
] | 3
|
2018-04-04T07:26:54.000Z
|
2020-08-14T01:43:29.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.tflearn.inputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import numpy as np
import tensorflow as tf
import inputs
from core import preprocessor
from core import standard_fields as fields
from utils import config_util
FLAGS = tf.flags.FLAGS
def _get_configs_for_model(model_name):
"""Returns configurations for model."""
# TODO: Make sure these tests work fine outside google3.
fname = os.path.join(
FLAGS.test_srcdir,
('google3/third_party/tensorflow_models/'
'object_detection/samples/configs/' + model_name + '.config'))
label_map_path = os.path.join(FLAGS.test_srcdir,
('google3/third_party/tensorflow_models/'
'object_detection/data/pet_label_map.pbtxt'))
data_path = os.path.join(FLAGS.test_srcdir,
('google3/third_party/tensorflow_models/'
'object_detection/test_data/pets_examples.record'))
configs = config_util.get_configs_from_pipeline_file(fname)
return config_util.merge_external_params_with_configs(
configs,
train_input_path=data_path,
eval_input_path=data_path,
label_map_path=label_map_path)
class InputsTest(tf.test.TestCase):
def test_faster_rcnn_resnet50_train_input(self):
"""Tests the training input function for FasterRcnnResnet50."""
configs = _get_configs_for_model('faster_rcnn_resnet50_pets')
configs['train_config'].unpad_groundtruth_tensors = True
model_config = configs['model']
model_config.faster_rcnn.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
configs['train_config'], configs['train_input_config'], model_config)
features, labels = train_input_fn()
self.assertAllEqual([None, None, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[None, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[None, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[None],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
def test_faster_rcnn_resnet50_eval_input(self):
"""Tests the eval input function for FasterRcnnResnet50."""
configs = _get_configs_for_model('faster_rcnn_resnet50_pets')
model_config = configs['model']
model_config.faster_rcnn.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
configs['eval_config'], configs['eval_input_config'], model_config)
features, labels = eval_input_fn()
self.assertAllEqual([1, None, None, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[1, None, None, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([1], features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[1, None, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[1, None, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[1, None],
labels[fields.InputDataFields.groundtruth_area].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_area].dtype)
self.assertAllEqual(
[1, None],
labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())
self.assertEqual(
tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype)
self.assertAllEqual(
[1, None],
labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())
self.assertEqual(
tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_ssd_inceptionV2_train_input(self):
"""Tests the training input function for SSDInceptionV2."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
model_config = configs['model']
model_config.ssd.num_classes = 37
batch_size = configs['train_config'].batch_size
train_input_fn = inputs.create_train_input_fn(
configs['train_config'], configs['train_input_config'], model_config)
features, labels = train_input_fn()
self.assertAllEqual([batch_size, 300, 300, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[batch_size],
labels[fields.InputDataFields.num_groundtruth_boxes].shape.as_list())
self.assertEqual(tf.int32,
labels[fields.InputDataFields.num_groundtruth_boxes].dtype)
self.assertAllEqual(
[batch_size, 50, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[batch_size, 50, model_config.ssd.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[batch_size, 50],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
def test_ssd_inceptionV2_eval_input(self):
"""Tests the eval input function for SSDInceptionV2."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
model_config = configs['model']
model_config.ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
configs['eval_config'], configs['eval_input_config'], model_config)
features, labels = eval_input_fn()
self.assertAllEqual([1, 300, 300, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[1, None, None, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([1], features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[1, None, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[1, None, model_config.ssd.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[1, None],
labels[fields.InputDataFields.groundtruth_area].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_area].dtype)
self.assertAllEqual(
[1, None],
labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())
self.assertEqual(
tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype)
self.assertAllEqual(
[1, None],
labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())
self.assertEqual(
tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_predict_input(self):
"""Tests the predict input function."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
predict_input_fn = inputs.create_predict_input_fn(
model_config=configs['model'])
serving_input_receiver = predict_input_fn()
image = serving_input_receiver.features[fields.InputDataFields.image]
receiver_tensors = serving_input_receiver.receiver_tensors[
inputs.SERVING_FED_EXAMPLE_KEY]
self.assertEqual([1, 300, 300, 3], image.shape.as_list())
self.assertEqual(tf.float32, image.dtype)
self.assertEqual(tf.string, receiver_tensors.dtype)
def test_error_with_bad_train_config(self):
"""Tests that a TypeError is raised with improper train config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['eval_config'], # Expecting `TrainConfig`.
train_input_config=configs['train_input_config'],
model_config=configs['model'])
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_train_input_config(self):
"""Tests that a TypeError is raised with improper train input config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['train_config'],
train_input_config=configs['model'], # Expecting `InputReader`.
model_config=configs['model'])
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_train_model_config(self):
"""Tests that a TypeError is raised with improper train model config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['train_config'],
train_input_config=configs['train_input_config'],
model_config=configs['train_config']) # Expecting `DetectionModel`.
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_eval_config(self):
"""Tests that a TypeError is raised with improper eval config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['train_config'], # Expecting `EvalConfig`.
eval_input_config=configs['eval_input_config'],
model_config=configs['model'])
with self.assertRaises(TypeError):
eval_input_fn()
def test_error_with_bad_eval_input_config(self):
"""Tests that a TypeError is raised with improper eval input config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['eval_config'],
eval_input_config=configs['model'], # Expecting `InputReader`.
model_config=configs['model'])
with self.assertRaises(TypeError):
eval_input_fn()
def test_error_with_bad_eval_model_config(self):
"""Tests that a TypeError is raised with improper eval model config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['eval_config'],
eval_input_config=configs['eval_input_config'],
model_config=configs['eval_config']) # Expecting `DetectionModel`.
with self.assertRaises(TypeError):
eval_input_fn()
class DataAugmentationFnTest(tf.test.TestCase):
def test_apply_image_and_box_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
with self.test_session() as sess:
augmented_tensor_dict_out = sess.run(augmented_tensor_dict)
self.assertAllEqual(
augmented_tensor_dict_out[fields.InputDataFields.image].shape,
[20, 20, 3]
)
self.assertAllClose(
augmented_tensor_dict_out[fields.InputDataFields.groundtruth_boxes],
[[10, 10, 20, 20]]
)
def test_include_masks_in_data_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
})
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_instance_masks:
tf.constant(np.zeros([2, 10, 10], np.uint8))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
with self.test_session() as sess:
augmented_tensor_dict_out = sess.run(augmented_tensor_dict)
self.assertAllEqual(
augmented_tensor_dict_out[fields.InputDataFields.image].shape,
[20, 20, 3])
self.assertAllEqual(augmented_tensor_dict_out[
fields.InputDataFields.groundtruth_instance_masks].shape, [2, 20, 20])
def test_include_keypoints_in_data_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant(np.array([[[0.5, 1.0], [0.5, 0.5]]], np.float32))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
with self.test_session() as sess:
augmented_tensor_dict_out = sess.run(augmented_tensor_dict)
self.assertAllEqual(
augmented_tensor_dict_out[fields.InputDataFields.image].shape,
[20, 20, 3]
)
self.assertAllClose(
augmented_tensor_dict_out[fields.InputDataFields.groundtruth_boxes],
[[10, 10, 20, 20]]
)
self.assertAllClose(
augmented_tensor_dict_out[fields.InputDataFields.groundtruth_keypoints],
[[[10, 20], [10, 10]]]
)
def _fake_model_preprocessor_fn(image):
return (image, tf.expand_dims(tf.shape(image)[1:], axis=0))
def _fake_image_resizer_fn(image, mask):
return (image, mask, tf.shape(image))
class DataTransformationFnTest(tf.test.TestCase):
def test_returns_correct_class_label_encodings(self):
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_classes],
[[0, 0, 1], [1, 0, 0]])
def test_returns_correct_merged_boxes(self):
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
merge_multiple_boxes=True)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_boxes],
[[.5, .5, 1., 1.]])
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_classes],
[[1, 0, 1]])
def test_returns_resized_masks(self):
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_instance_masks:
tf.constant(np.random.rand(2, 4, 4).astype(np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def fake_image_resizer_fn(image, masks):
resized_image = tf.image.resize_images(image, [8, 8])
resized_masks = tf.transpose(
tf.image.resize_images(tf.transpose(masks, [1, 2, 0]), [8, 8]),
[2, 0, 1])
return resized_image, resized_masks, tf.shape(resized_image)
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=fake_image_resizer_fn,
num_classes=num_classes)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllEqual(transformed_inputs[
fields.InputDataFields.groundtruth_instance_masks].shape, [2, 8, 8])
def test_applies_model_preprocess_fn_to_image_tensor(self):
np_image = np.random.randint(256, size=(4, 4, 3))
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np_image),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def fake_model_preprocessor_fn(image):
return (image / 255., tf.expand_dims(tf.shape(image)[1:], axis=0))
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(transformed_inputs[fields.InputDataFields.image],
np_image / 255.)
self.assertAllClose(transformed_inputs[fields.InputDataFields.
true_image_shape],
[4, 4, 3])
def test_applies_data_augmentation_fn_to_tensor_dict(self):
np_image = np.random.randint(256, size=(4, 4, 3))
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np_image),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def add_one_data_augmentation_fn(tensor_dict):
return {key: value + 1 for key, value in tensor_dict.items()}
num_classes = 4
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=add_one_data_augmentation_fn)
with self.test_session() as sess:
augmented_tensor_dict = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllEqual(augmented_tensor_dict[fields.InputDataFields.image],
np_image + 1)
self.assertAllEqual(
augmented_tensor_dict[fields.InputDataFields.groundtruth_classes],
[[0, 0, 0, 1], [0, 1, 0, 0]])
def test_applies_data_augmentation_fn_before_model_preprocess_fn(self):
np_image = np.random.randint(256, size=(4, 4, 3))
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np_image),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def mul_two_model_preprocessor_fn(image):
return (image * 2, tf.expand_dims(tf.shape(image)[1:], axis=0))
def add_five_to_image_data_augmentation_fn(tensor_dict):
tensor_dict[fields.InputDataFields.image] += 5
return tensor_dict
num_classes = 4
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=mul_two_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=add_five_to_image_data_augmentation_fn)
with self.test_session() as sess:
augmented_tensor_dict = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllEqual(augmented_tensor_dict[fields.InputDataFields.image],
(np_image + 5) * 2)
if __name__ == '__main__':
tf.test.main()
| 42.122592
| 80
| 0.695992
| 2,884
| 24,052
| 5.495146
| 0.09466
| 0.113958
| 0.109036
| 0.076729
| 0.85872
| 0.835815
| 0.81657
| 0.79764
| 0.776439
| 0.758329
| 0
| 0.021064
| 0.196657
| 24,052
| 570
| 81
| 42.196491
| 0.799141
| 0.036172
| 0
| 0.678351
| 0
| 0
| 0.042491
| 0.021179
| 0
| 0
| 0
| 0.001754
| 0.162887
| 0
| null | null | 0
| 0.02268
| null | null | 0.002062
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5d900a3d6c9cd23696fe0d2b7c20d8ff5142b9e5
| 68,117
|
py
|
Python
|
tools/wptrunner/wptrunner/tests/test_update.py
|
hoojaoh/web-platform-tests
|
3503c50a6452e153bde906a9c6644cb6237224fc
|
[
"BSD-3-Clause"
] | 8
|
2019-04-09T21:13:05.000Z
|
2021-11-23T17:25:18.000Z
|
tools/wptrunner/wptrunner/tests/test_update.py
|
hoojaoh/web-platform-tests
|
3503c50a6452e153bde906a9c6644cb6237224fc
|
[
"BSD-3-Clause"
] | 7
|
2019-07-08T22:23:16.000Z
|
2021-03-18T23:42:32.000Z
|
tools/wptrunner/wptrunner/tests/test_update.py
|
AYCHKnow/WebPlatformTest
|
ccf316907ed4451ad0c752b686abb6ab75c54618
|
[
"BSD-3-Clause"
] | 11
|
2019-04-12T01:20:16.000Z
|
2021-11-23T17:25:02.000Z
|
import json
import mock
import os
import pytest
import sys
from io import BytesIO
from .. import metadata, manifestupdate
from ..update.update import WPTUpdate
from ..update.base import StepRunner, Step
from mozlog import structuredlog, handlers, formatters
here = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(here, os.pardir, os.pardir, os.pardir))
from manifest import manifest, item as manifest_item
def rel_path_to_test_url(rel_path):
assert not os.path.isabs(rel_path)
return rel_path.replace(os.sep, "/")
def SourceFileWithTest(path, hash, cls, *args):
s = mock.Mock(rel_path=path, hash=hash)
test = cls("/foobar", path, "/", rel_path_to_test_url(path), *args)
s.manifest_items = mock.Mock(return_value=(cls.item_type, [test]))
return s
item_classes = {"testharness": manifest_item.TestharnessTest,
"reftest": manifest_item.RefTest,
"reftest_node": manifest_item.RefTestNode,
"manual": manifest_item.ManualTest,
"wdspec": manifest_item.WebDriverSpecTest,
"conformancechecker": manifest_item.ConformanceCheckerTest,
"visual": manifest_item.VisualTest,
"support": manifest_item.SupportFile}
default_run_info = {"debug": False, "os": "linux", "version": "18.04", "processor": "x86_64", "bits": 64}
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
def reset_globals():
metadata.prop_intern.clear()
metadata.run_info_intern.clear()
metadata.status_intern.clear()
def get_run_info(overrides):
run_info = default_run_info.copy()
run_info.update(overrides)
return run_info
def update(tests, *logs, **kwargs):
full_update = kwargs.pop("full_update", False)
disable_intermittent = kwargs.pop("disable_intermittent", False)
update_intermittent = kwargs.pop("update_intermittent", False)
remove_intermittent = kwargs.pop("remove_intermittent", False)
assert not kwargs
id_test_map, updater = create_updater(tests)
for log in logs:
log = create_log(log)
updater.update_from_log(log)
update_properties = (["debug", "os", "version", "processor"],
{"os": ["version"], "processor": "bits"})
expected_data = {}
metadata.load_expected = lambda _, __, test_path, *args: expected_data.get(test_path)
for test_path, test_ids, test_type, manifest_str in tests:
expected_data[test_path] = manifestupdate.compile(BytesIO(manifest_str),
test_path,
"/",
update_properties,
update_intermittent,
remove_intermittent)
return list(metadata.update_results(id_test_map,
update_properties,
full_update,
disable_intermittent,
update_intermittent,
remove_intermittent))
def create_updater(tests, url_base="/", **kwargs):
id_test_map = {}
m = create_test_manifest(tests, url_base)
reset_globals()
id_test_map = metadata.create_test_tree(None, m)
return id_test_map, metadata.ExpectedUpdater(id_test_map, **kwargs)
def create_log(entries):
data = BytesIO()
if isinstance(entries, list):
logger = structuredlog.StructuredLogger("expected_test")
handler = handlers.StreamHandler(data, formatters.JSONFormatter())
logger.add_handler(handler)
for item in entries:
action, kwargs = item
getattr(logger, action)(**kwargs)
logger.remove_handler(handler)
else:
json.dump(entries, data)
data.seek(0)
return data
def suite_log(entries, run_info=None):
_run_info = default_run_info.copy()
if run_info:
_run_info.update(run_info)
return ([("suite_start", {"tests": [], "run_info": _run_info})] +
entries +
[("suite_end", {})])
def create_test_manifest(tests, url_base="/"):
source_files = []
for i, (test, _, test_type, _) in enumerate(tests):
if test_type:
source_files.append((SourceFileWithTest(test, str(i) * 40, item_classes[test_type]), True))
m = manifest.Manifest()
m.update(source_files)
return m
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_0():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: FAIL""")]
log = suite_log([("test_start", {"test": "/path/to/test.htm"}),
("test_status", {"test": "/path/to/test.htm",
"subtest": "test1",
"status": "PASS",
"expected": "FAIL"}),
("test_end", {"test": "/path/to/test.htm",
"status": "OK"})])
updated = update(tests, log)
assert len(updated) == 1
assert updated[0][1].is_empty
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_1():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: ERROR""")]
log = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "ERROR"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get("expected", default_run_info) == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_known_intermittent_1():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: PASS""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
log_2 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, log_1, log_2, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == ["PASS", "FAIL"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_known_intermittent_2():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: PASS""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_existing_known_intermittent():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: [PASS, FAIL]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "ERROR",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
log_2 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, log_1, log_2, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == ["PASS", "ERROR", "FAIL"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_remove_previous_intermittent():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: [PASS, FAIL]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "ERROR",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
log_2 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests,
log_0,
log_1,
log_2,
update_intermittent=True,
remove_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == ["PASS", "ERROR"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_new_test_with_intermittent():
tests = [("path/to/test.htm", [test_id], "testharness", None)]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
log_2 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, log_1, log_2, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test("test.htm") is None
assert len(new_manifest.get_test(test_id).children) == 1
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == ["PASS", "FAIL"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_expected_tie_resolution():
tests = [("path/to/test.htm", [test_id], "testharness", None)]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, log_1, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == ["PASS", "FAIL"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_reorder_expected():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: [PASS, FAIL]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
log_2 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, log_1, log_2, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == ["FAIL", "PASS"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_and_preserve_unchanged_expected_intermittent():
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected:
if os == "android": [PASS, FAIL]
FAIL""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "FAIL",
"expected": "PASS",
"known_intermittent": ["FAIL"]})],
run_info={"os": "android"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]})],
run_info={"os": "android"})
log_2 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "PASS",
"expected": "FAIL"})])
updated = update(tests, log_0, log_1, log_2)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "android"})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get(
"expected", run_info_1) == ["PASS", "FAIL"]
assert new_manifest.get_test(test_id).get(
"expected", default_run_info) == "PASS"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_test_with_intermittent_to_one_expected_status():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: [PASS, FAIL]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "ERROR",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == "ERROR"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_intermittent_with_conditions():
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected:
if os == "android": [PASS, FAIL]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "TIMEOUT",
"expected": "PASS",
"known_intermittent": ["FAIL"]})],
run_info={"os": "android"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]})],
run_info={"os": "android"})
updated = update(tests, log_0, log_1, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "android"})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get(
"expected", run_info_1) == ["PASS", "TIMEOUT", "FAIL"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_and_remove_intermittent_with_conditions():
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected:
if os == "android": [PASS, FAIL]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "TIMEOUT",
"expected": "PASS",
"known_intermittent": ["FAIL"]})],
run_info={"os": "android"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]})],
run_info={"os": "android"})
updated = update(tests, log_0, log_1, update_intermittent=True, remove_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "android"})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get(
"expected", run_info_1) == ["PASS", "TIMEOUT"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_intermittent_full():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected:
if os == "mac": [FAIL, TIMEOUT]
FAIL""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL",
"known_intermittent": ["TIMEOUT"]}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "mac"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, log_1, update_intermittent=True, full_update=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "mac"})
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == ["FAIL", "TIMEOUT"]
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_intermittent_full_remove():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected:
if os == "mac": [FAIL, TIMEOUT, PASS]
FAIL""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL",
"known_intermittent": ["TIMEOUT", "PASS"]}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "mac"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "TIMEOUT",
"expected": "FAIL",
"known_intermittent": ["TIMEOUT", "PASS"]}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "mac"})
log_2 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, log_1, log_2, update_intermittent=True,
full_update=True, remove_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "mac"})
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == ["FAIL", "TIMEOUT"]
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_full_update():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected:
if os == "mac": [FAIL, TIMEOUT]
FAIL""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL",
"known_intermittent": ["TIMEOUT"]}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "mac"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, log_1, full_update=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "mac"})
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == "FAIL"
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_full_orphan():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: FAIL
[subsub test]
expected: TIMEOUT
[test2]
expected: ERROR
""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, full_update=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert len(new_manifest.get_test(test_id).children[0].children) == 0
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == "FAIL"
assert len(new_manifest.get_test(test_id).children) == 1
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_reorder_expected_full_conditions():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected:
if os == "mac": [FAIL, TIMEOUT]
[FAIL, PASS]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "TIMEOUT",
"expected": "FAIL",
"known_intermittent": ["TIMEOUT"]}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "mac"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "TIMEOUT",
"expected": "FAIL",
"known_intermittent": ["TIMEOUT"]}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "mac"})
log_2 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "FAIL",
"known_intermittent": ["PASS"]}),
("test_end", {"test": test_id,
"status": "OK"})])
log_3 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "FAIL",
"known_intermittent": ["PASS"]}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, log_1, log_2, log_3, update_intermittent=True, full_update=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "mac"})
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == ["TIMEOUT", "FAIL"]
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == ["PASS", "FAIL"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_skip_0():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: FAIL""")]
log = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log)
assert not updated
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_new_subtest():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected: FAIL""")]
log = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_status", {"test": test_id,
"subtest": "test2",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get("expected", default_run_info) == "FAIL"
assert new_manifest.get_test(test_id).children[1].get("expected", default_run_info) == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_multiple_0():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected: FAIL""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False, "os": "osx"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "TIMEOUT",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False, "os": "linux"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"debug": False, "os": "osx"})
run_info_2 = default_run_info.copy()
run_info_2.update({"debug": False, "os": "linux"})
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == "FAIL"
assert new_manifest.get_test(test_id).children[0].get(
"expected", {"debug": False, "os": "linux"}) == "TIMEOUT"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_multiple_1():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected: FAIL""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "osx"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "TIMEOUT",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "linux"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "osx"})
run_info_2 = default_run_info.copy()
run_info_2.update({"os": "linux"})
run_info_3 = default_run_info.copy()
run_info_3.update({"os": "win"})
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == "FAIL"
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_2) == "TIMEOUT"
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_3) == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_multiple_2():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected: FAIL""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False, "os": "osx"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "TIMEOUT",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": True, "os": "osx"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
run_info_1 = default_run_info.copy()
run_info_1.update({"debug": False, "os": "osx"})
run_info_2 = default_run_info.copy()
run_info_2.update({"debug": True, "os": "osx"})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == "FAIL"
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_2) == "TIMEOUT"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_multiple_3():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected:
if debug: FAIL
if not debug and os == "osx": TIMEOUT""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False, "os": "osx"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "TIMEOUT",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": True, "os": "osx"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
run_info_1 = default_run_info.copy()
run_info_1.update({"debug": False, "os": "osx"})
run_info_2 = default_run_info.copy()
run_info_2.update({"debug": True, "os": "osx"})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == "FAIL"
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_2) == "TIMEOUT"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_ignore_existing():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected:
if debug: TIMEOUT
if not debug and os == "osx": NOTRUN""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False, "os": "linux"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": True, "os": "windows"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
run_info_1 = default_run_info.copy()
run_info_1.update({"debug": False, "os": "linux"})
run_info_2 = default_run_info.copy()
run_info_2.update({"debug": False, "os": "osx"})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == "FAIL"
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_2) == "NOTRUN"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_new_test():
tests = [("path/to/test.htm", [test_id], "testharness", None)]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
run_info_1 = default_run_info.copy()
assert not new_manifest.is_empty
assert new_manifest.get_test("test.htm") is None
assert len(new_manifest.get_test(test_id).children) == 1
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_duplicate():
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected: ERROR""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "PASS"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "FAIL"})])
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
run_info_1 = default_run_info.copy()
assert new_manifest.get_test(test_id).get(
"expected", run_info_1) == "ERROR"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_disable_intermittent():
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected: ERROR""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "PASS"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "FAIL"})])
updated = update(tests, log_0, log_1, disable_intermittent="Some message")
new_manifest = updated[0][1]
run_info_1 = default_run_info.copy()
assert new_manifest.get_test(test_id).get(
"disabled", run_info_1) == "Some message"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_stability_conditional_instability():
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected: ERROR""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "PASS"})],
run_info={"os": "linux"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "FAIL"})],
run_info={"os": "linux"})
log_2 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "FAIL"})],
run_info={"os": "mac"})
updated = update(tests, log_0, log_1, log_2, disable_intermittent="Some message")
new_manifest = updated[0][1]
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "linux"})
run_info_2 = default_run_info.copy()
run_info_2.update({"os": "mac"})
assert new_manifest.get_test(test_id).get(
"disabled", run_info_1) == "Some message"
with pytest.raises(KeyError):
assert new_manifest.get_test(test_id).get(
"disabled", run_info_2)
assert new_manifest.get_test(test_id).get(
"expected", run_info_2) == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_full():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected:
if debug: TIMEOUT
if not debug and os == "osx": NOTRUN
[test2]
expected: FAIL
[test.js]
[test1]
expected: FAIL
""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "ERROR",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": True})
updated = update(tests, log_0, log_1, full_update=True)
new_manifest = updated[0][1]
run_info_1 = default_run_info.copy()
run_info_1.update({"debug": False, "os": "win"})
run_info_2 = default_run_info.copy()
run_info_2.update({"debug": True, "os": "osx"})
assert not new_manifest.is_empty
assert new_manifest.get_test("test.js") is None
assert len(new_manifest.get_test(test_id).children) == 1
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == "FAIL"
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_2) == "ERROR"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_full_unknown():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected:
if release_or_beta: ERROR
if not debug and os == "osx": NOTRUN
""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": False, "release_or_beta": False})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"debug": True, "release_or_beta": False})
updated = update(tests, log_0, log_1, full_update=True)
new_manifest = updated[0][1]
run_info_1 = default_run_info.copy()
run_info_1.update({"release_or_beta": False})
run_info_2 = default_run_info.copy()
run_info_2.update({"release_or_beta": True})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == "FAIL"
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_2) == "ERROR"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_default():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected:
if os == "mac": FAIL
ERROR""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "FAIL"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "mac"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "ERROR"}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "linux"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert new_manifest.is_empty
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_default_1():
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected:
if os == "mac": TIMEOUT
ERROR""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"expected": "ERROR",
"status": "FAIL"})],
run_info={"os": "linux"})
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "mac"})
run_info_2 = default_run_info.copy()
run_info_2.update({"os": "win"})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get(
"expected", run_info_1) == "TIMEOUT"
assert new_manifest.get_test(test_id).get(
"expected", run_info_2) == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_default_2():
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected:
if os == "mac": TIMEOUT
ERROR""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"expected": "ERROR",
"status": "TIMEOUT"})],
run_info={"os": "linux"})
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "mac"})
run_info_2 = default_run_info.copy()
run_info_2.update({"os": "win"})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get(
"expected", run_info_1) == "TIMEOUT"
assert new_manifest.get_test(test_id).get(
"expected", run_info_2) == "TIMEOUT"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_assertion_count_0():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
max-asserts: 4
min-asserts: 2
""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 6,
"min_expected": 2,
"max_expected": 4}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get("max-asserts") == "7"
assert new_manifest.get_test(test_id).get("min-asserts") == "2"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_assertion_count_1():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
max-asserts: 4
min-asserts: 2
""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 1,
"min_expected": 2,
"max_expected": 4}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get("max-asserts") == "4"
assert new_manifest.get_test(test_id).has_key("min-asserts") is False
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_assertion_count_2():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
max-asserts: 4
min-asserts: 2
""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 3,
"min_expected": 2,
"max_expected": 4}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0)
assert not updated
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_assertion_count_3():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
max-asserts: 4
min-asserts: 2
""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 6,
"min_expected": 2,
"max_expected": 4}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "windows"})
log_1 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 7,
"min_expected": 2,
"max_expected": 4}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "linux"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get("max-asserts") == "8"
assert new_manifest.get_test(test_id).get("min-asserts") == "2"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_assertion_count_4():
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 6,
"min_expected": 0,
"max_expected": 0}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "windows"})
log_1 = suite_log([("test_start", {"test": test_id}),
("assertion_count", {"test": test_id,
"count": 7,
"min_expected": 0,
"max_expected": 0}),
("test_end", {"test": test_id,
"status": "OK"})],
run_info={"os": "linux"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get("max-asserts") == "8"
assert new_manifest.get_test(test_id).has_key("min-asserts") is False
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_lsan_0():
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, "")]
log_0 = suite_log([("lsan_leak", {"scope": "path/to/",
"frames": ["foo", "bar"]})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get("lsan-allowed") == ["foo"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_lsan_1():
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, """
lsan-allowed: [foo]""")]
log_0 = suite_log([("lsan_leak", {"scope": "path/to/",
"frames": ["foo", "bar"]}),
("lsan_leak", {"scope": "path/to/",
"frames": ["baz", "foobar"]})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get("lsan-allowed") == ["baz", "foo"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_lsan_2():
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/__dir__", ["path/__dir__"], None, """
lsan-allowed: [foo]"""),
("path/to/__dir__", [dir_id], None, "")]
log_0 = suite_log([("lsan_leak", {"scope": "path/to/",
"frames": ["foo", "bar"],
"allowed_match": ["foo"]}),
("lsan_leak", {"scope": "path/to/",
"frames": ["baz", "foobar"]})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get("lsan-allowed") == ["baz"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_lsan_3():
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, "")]
log_0 = suite_log([("lsan_leak", {"scope": "path/to/",
"frames": ["foo", "bar"]})],
run_info={"os": "win"})
log_1 = suite_log([("lsan_leak", {"scope": "path/to/",
"frames": ["baz", "foobar"]})],
run_info={"os": "linux"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get("lsan-allowed") == ["baz", "foo"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_wptreport_0():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: FAIL""")]
log = {"run_info": default_run_info.copy(),
"results": [
{"test": "/path/to/test.htm",
"subtests": [{"name": "test1",
"status": "PASS",
"expected": "FAIL"}],
"status": "OK"}]}
updated = update(tests, log)
assert len(updated) == 1
assert updated[0][1].is_empty
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_wptreport_1():
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, "")]
log = {"run_info": default_run_info.copy(),
"results": [],
"lsan_leaks": [{"scope": "path/to/",
"frames": ["baz", "foobar"]}]}
updated = update(tests, log)
assert len(updated) == 1
assert updated[0][1].get("lsan-allowed") == ["baz"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_leak_total_0():
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, "")]
log_0 = suite_log([("mozleak_total", {"scope": "path/to/",
"process": "default",
"bytes": 100,
"threshold": 0,
"objects": []})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get("leak-threshold") == ['default:51200']
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_leak_total_1():
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, "")]
log_0 = suite_log([("mozleak_total", {"scope": "path/to/",
"process": "default",
"bytes": 100,
"threshold": 1000,
"objects": []})])
updated = update(tests, log_0)
assert not updated
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_leak_total_2():
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, """
leak-total: 110""")]
log_0 = suite_log([("mozleak_total", {"scope": "path/to/",
"process": "default",
"bytes": 100,
"threshold": 110,
"objects": []})])
updated = update(tests, log_0)
assert not updated
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_leak_total_3():
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, """
leak-total: 100""")]
log_0 = suite_log([("mozleak_total", {"scope": "path/to/",
"process": "default",
"bytes": 1000,
"threshold": 100,
"objects": []})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get("leak-threshold") == ['default:51200']
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_leak_total_4():
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, """
leak-total: 110""")]
log_0 = suite_log([
("lsan_leak", {"scope": "path/to/",
"frames": ["foo", "bar"]}),
("mozleak_total", {"scope": "path/to/",
"process": "default",
"bytes": 100,
"threshold": 110,
"objects": []})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.has_key("leak-threshold") is False
class TestStep(Step):
def create(self, state):
tests = [("path/to/test.htm", [test_id], "testharness", "")]
state.foo = create_test_manifest(tests)
class UpdateRunner(StepRunner):
steps = [TestStep]
@pytest.mark.xfail(sys.version[0] == "3",
reason="update.state doesn't support py3")
def test_update_pickle():
logger = structuredlog.StructuredLogger("expected_test")
args = {
"test_paths": {
"/": {"tests_path": os.path.abspath(os.path.join(here,
os.pardir,
os.pardir,
os.pardir,
os.pardir))},
},
"abort": False,
"continue": False,
"sync": False,
}
args2 = args.copy()
args2["abort"] = True
wptupdate = WPTUpdate(logger, **args2)
wptupdate = WPTUpdate(logger, runner_cls=UpdateRunner, **args)
wptupdate.run()
| 39.147701
| 105
| 0.466022
| 6,927
| 68,117
| 4.31702
| 0.036379
| 0.064406
| 0.089955
| 0.042636
| 0.888142
| 0.880083
| 0.871556
| 0.870586
| 0.864433
| 0.856407
| 0
| 0.017737
| 0.378408
| 68,117
| 1,739
| 106
| 39.170213
| 0.688529
| 0
| 0
| 0.797927
| 0
| 0
| 0.198433
| 0
| 0
| 0
| 0
| 0
| 0.110289
| 1
| 0.045152
| false
| 0.054774
| 0.008142
| 0
| 0.061436
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
5d9e59c1041e13d8e5628d31c10c5f268043b15b
| 39,596
|
py
|
Python
|
trustpayments/api/transaction_invoice_service_api.py
|
TrustPayments/python-sdk
|
6fde6eb8cfce270c3612a2903a845c13018c3bb9
|
[
"Apache-2.0"
] | null | null | null |
trustpayments/api/transaction_invoice_service_api.py
|
TrustPayments/python-sdk
|
6fde6eb8cfce270c3612a2903a845c13018c3bb9
|
[
"Apache-2.0"
] | null | null | null |
trustpayments/api/transaction_invoice_service_api.py
|
TrustPayments/python-sdk
|
6fde6eb8cfce270c3612a2903a845c13018c3bb9
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
import six
from trustpayments.api_client import ApiClient
class TransactionInvoiceServiceApi:
def __init__(self, configuration):
self.api_client = ApiClient(configuration=configuration)
def count(self, space_id, **kwargs):
"""Count
Counts the number of items in the database as restricted by the given filter.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.count(space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param EntityQueryFilter filter: The filter which restricts the entities which are used to calculate the count.
:return: int
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.count_with_http_info(space_id, **kwargs)
else:
(data) = self.count_with_http_info(space_id, **kwargs)
return data
def count_with_http_info(self, space_id, **kwargs):
"""Count
Counts the number of items in the database as restricted by the given filter.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.count_with_http_info(space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param EntityQueryFilter filter: The filter which restricts the entities which are used to calculate the count.
:return: int
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['space_id', 'filter']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method count" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'space_id' is set
if ('space_id' not in params or
params['space_id'] is None):
raise ValueError("Missing the required parameter `space_id` when calling `count`")
collection_formats = {}
path_params = {}
query_params = []
if 'space_id' in params:
query_params.append(('spaceId', params['space_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'filter' in params:
body_params = params['filter']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json;charset=utf-8'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/transaction-invoice/count', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='int',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_invoice_document(self, space_id, id, **kwargs):
"""getInvoiceDocument
Returns the PDF document for the transaction invoice with given id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_invoice_document(space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param int id: The id of the transaction invoice to get the document for. (required)
:return: RenderedDocument
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_invoice_document_with_http_info(space_id, id, **kwargs)
else:
(data) = self.get_invoice_document_with_http_info(space_id, id, **kwargs)
return data
def get_invoice_document_with_http_info(self, space_id, id, **kwargs):
"""getInvoiceDocument
Returns the PDF document for the transaction invoice with given id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_invoice_document_with_http_info(space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param int id: The id of the transaction invoice to get the document for. (required)
:return: RenderedDocument
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['space_id', 'id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_invoice_document" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'space_id' is set
if ('space_id' not in params or
params['space_id'] is None):
raise ValueError("Missing the required parameter `space_id` when calling `get_invoice_document`")
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_invoice_document`")
collection_formats = {}
path_params = {}
query_params = []
if 'space_id' in params:
query_params.append(('spaceId', params['space_id']))
if 'id' in params:
query_params.append(('id', params['id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['*/*'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/transaction-invoice/getInvoiceDocument', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RenderedDocument',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_invoice_document_with_target_media_type(self, space_id, id, target_media_type_id, **kwargs):
"""getInvoiceDocumentWithTargetMediaType
Returns the PDF document for the transaction invoice with given id and target media type id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_invoice_document_with_target_media_type(space_id, id, target_media_type_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param int id: The id of the transaction invoice to get the document for. (required)
:param int target_media_type_id: The id of the target media type for which the invoice should be generated for. (required)
:return: RenderedDocument
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_invoice_document_with_target_media_type_with_http_info(space_id, id, target_media_type_id, **kwargs)
else:
(data) = self.get_invoice_document_with_target_media_type_with_http_info(space_id, id, target_media_type_id, **kwargs)
return data
def get_invoice_document_with_target_media_type_with_http_info(self, space_id, id, target_media_type_id, **kwargs):
"""getInvoiceDocumentWithTargetMediaType
Returns the PDF document for the transaction invoice with given id and target media type id.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_invoice_document_with_target_media_type_with_http_info(space_id, id, target_media_type_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param int id: The id of the transaction invoice to get the document for. (required)
:param int target_media_type_id: The id of the target media type for which the invoice should be generated for. (required)
:return: RenderedDocument
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['space_id', 'id', 'target_media_type_id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_invoice_document_with_target_media_type" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'space_id' is set
if ('space_id' not in params or
params['space_id'] is None):
raise ValueError("Missing the required parameter `space_id` when calling `get_invoice_document_with_target_media_type`")
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_invoice_document_with_target_media_type`")
# verify the required parameter 'target_media_type_id' is set
if ('target_media_type_id' not in params or
params['target_media_type_id'] is None):
raise ValueError("Missing the required parameter `target_media_type_id` when calling `get_invoice_document_with_target_media_type`")
collection_formats = {}
path_params = {}
query_params = []
if 'space_id' in params:
query_params.append(('spaceId', params['space_id']))
if 'id' in params:
query_params.append(('id', params['id']))
if 'target_media_type_id' in params:
query_params.append(('targetMediaTypeId', params['target_media_type_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['*/*'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/transaction-invoice/getInvoiceDocumentWithTargetMediaType', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RenderedDocument',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def is_replacement_possible(self, space_id, id, **kwargs):
"""isReplacementPossible
Returns whether the transaction invoice with the given id can be replaced.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.is_replacement_possible(space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param int id: The invoice which should be checked if a replacement is possible. (required)
:return: bool
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.is_replacement_possible_with_http_info(space_id, id, **kwargs)
else:
(data) = self.is_replacement_possible_with_http_info(space_id, id, **kwargs)
return data
def is_replacement_possible_with_http_info(self, space_id, id, **kwargs):
"""isReplacementPossible
Returns whether the transaction invoice with the given id can be replaced.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.is_replacement_possible_with_http_info(space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param int id: The invoice which should be checked if a replacement is possible. (required)
:return: bool
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['space_id', 'id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method is_replacement_possible" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'space_id' is set
if ('space_id' not in params or
params['space_id'] is None):
raise ValueError("Missing the required parameter `space_id` when calling `is_replacement_possible`")
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `is_replacement_possible`")
collection_formats = {}
path_params = {}
query_params = []
if 'space_id' in params:
query_params.append(('spaceId', params['space_id']))
if 'id' in params:
query_params.append(('id', params['id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['*/*'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/transaction-invoice/isReplacementPossible', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='bool',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def mark_as_derecognized(self, space_id, id, **kwargs):
"""Mark as Derecognized
Marks the transaction invoice with the given id as derecognized.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.mark_as_derecognized(space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param int id: The id of the transaction invoice which should be marked as derecognized. (required)
:return: TransactionInvoice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.mark_as_derecognized_with_http_info(space_id, id, **kwargs)
else:
(data) = self.mark_as_derecognized_with_http_info(space_id, id, **kwargs)
return data
def mark_as_derecognized_with_http_info(self, space_id, id, **kwargs):
"""Mark as Derecognized
Marks the transaction invoice with the given id as derecognized.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.mark_as_derecognized_with_http_info(space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param int id: The id of the transaction invoice which should be marked as derecognized. (required)
:return: TransactionInvoice
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['space_id', 'id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method mark_as_derecognized" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'space_id' is set
if ('space_id' not in params or
params['space_id'] is None):
raise ValueError("Missing the required parameter `space_id` when calling `mark_as_derecognized`")
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `mark_as_derecognized`")
collection_formats = {}
path_params = {}
query_params = []
if 'space_id' in params:
query_params.append(('spaceId', params['space_id']))
if 'id' in params:
query_params.append(('id', params['id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json;charset=utf-8'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/transaction-invoice/markAsDerecognized', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TransactionInvoice',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def mark_as_paid(self, space_id, id, **kwargs):
"""Mark as Paid
Marks the transaction invoice with the given id as paid.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.mark_as_paid(space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param int id: The id of the transaction invoice which should be marked as paid. (required)
:return: TransactionInvoice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.mark_as_paid_with_http_info(space_id, id, **kwargs)
else:
(data) = self.mark_as_paid_with_http_info(space_id, id, **kwargs)
return data
def mark_as_paid_with_http_info(self, space_id, id, **kwargs):
"""Mark as Paid
Marks the transaction invoice with the given id as paid.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.mark_as_paid_with_http_info(space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param int id: The id of the transaction invoice which should be marked as paid. (required)
:return: TransactionInvoice
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['space_id', 'id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method mark_as_paid" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'space_id' is set
if ('space_id' not in params or
params['space_id'] is None):
raise ValueError("Missing the required parameter `space_id` when calling `mark_as_paid`")
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `mark_as_paid`")
collection_formats = {}
path_params = {}
query_params = []
if 'space_id' in params:
query_params.append(('spaceId', params['space_id']))
if 'id' in params:
query_params.append(('id', params['id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json;charset=utf-8'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/transaction-invoice/markAsPaid', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TransactionInvoice',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read(self, space_id, id, **kwargs):
"""Read
Reads the entity with the given 'id' and returns it.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read(space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param int id: The id of the transaction invoices which should be returned. (required)
:return: TransactionInvoice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_with_http_info(space_id, id, **kwargs)
else:
(data) = self.read_with_http_info(space_id, id, **kwargs)
return data
def read_with_http_info(self, space_id, id, **kwargs):
"""Read
Reads the entity with the given 'id' and returns it.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_with_http_info(space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param int id: The id of the transaction invoices which should be returned. (required)
:return: TransactionInvoice
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['space_id', 'id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'space_id' is set
if ('space_id' not in params or
params['space_id'] is None):
raise ValueError("Missing the required parameter `space_id` when calling `read`")
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `read`")
collection_formats = {}
path_params = {}
query_params = []
if 'space_id' in params:
query_params.append(('spaceId', params['space_id']))
if 'id' in params:
query_params.append(('id', params['id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['*/*'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/transaction-invoice/read', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TransactionInvoice',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace(self, space_id, id, replacement, **kwargs):
"""replace
Replaces the transaction invoice with given id with the replacement and returns the new transaction invoice.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace(space_id, id, replacement, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param int id: The id of the transaction invoices which should be replaced. (required)
:param TransactionInvoiceReplacement replacement: (required)
:return: TransactionInvoice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_with_http_info(space_id, id, replacement, **kwargs)
else:
(data) = self.replace_with_http_info(space_id, id, replacement, **kwargs)
return data
def replace_with_http_info(self, space_id, id, replacement, **kwargs):
"""replace
Replaces the transaction invoice with given id with the replacement and returns the new transaction invoice.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_with_http_info(space_id, id, replacement, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param int id: The id of the transaction invoices which should be replaced. (required)
:param TransactionInvoiceReplacement replacement: (required)
:return: TransactionInvoice
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['space_id', 'id', 'replacement']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'space_id' is set
if ('space_id' not in params or
params['space_id'] is None):
raise ValueError("Missing the required parameter `space_id` when calling `replace`")
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `replace`")
# verify the required parameter 'replacement' is set
if ('replacement' not in params or
params['replacement'] is None):
raise ValueError("Missing the required parameter `replacement` when calling `replace`")
collection_formats = {}
path_params = {}
query_params = []
if 'space_id' in params:
query_params.append(('spaceId', params['space_id']))
if 'id' in params:
query_params.append(('id', params['id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'replacement' in params:
body_params = params['replacement']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json;charset=utf-8'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/transaction-invoice/replace', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TransactionInvoice',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search(self, space_id, query, **kwargs):
"""Search
Searches for the entities as specified by the given query.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search(space_id, query, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param EntityQuery query: The query restricts the transaction invoices which are returned by the search. (required)
:return: list[TransactionInvoice]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_with_http_info(space_id, query, **kwargs)
else:
(data) = self.search_with_http_info(space_id, query, **kwargs)
return data
def search_with_http_info(self, space_id, query, **kwargs):
"""Search
Searches for the entities as specified by the given query.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_with_http_info(space_id, query, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int space_id: (required)
:param EntityQuery query: The query restricts the transaction invoices which are returned by the search. (required)
:return: list[TransactionInvoice]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['space_id', 'query']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'space_id' is set
if ('space_id' not in params or
params['space_id'] is None):
raise ValueError("Missing the required parameter `space_id` when calling `search`")
# verify the required parameter 'query' is set
if ('query' not in params or
params['query'] is None):
raise ValueError("Missing the required parameter `query` when calling `search`")
collection_formats = {}
path_params = {}
query_params = []
if 'space_id' in params:
query_params.append(('spaceId', params['space_id']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'query' in params:
body_params = params['query']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=utf-8'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json;charset=utf-8'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/transaction-invoice/search', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[TransactionInvoice]',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 39.359841
| 144
| 0.61683
| 4,626
| 39,596
| 5.032425
| 0.037397
| 0.040593
| 0.018943
| 0.027835
| 0.967311
| 0.958591
| 0.950215
| 0.946349
| 0.935052
| 0.927448
| 0
| 0.000538
| 0.295358
| 39,596
| 1,005
| 145
| 39.399005
| 0.833841
| 0.309223
| 0
| 0.799632
| 0
| 0
| 0.207847
| 0.067382
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034926
| false
| 0
| 0.005515
| 0
| 0.091912
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5da25145f2696217a60685b923bd32771151318e
| 23,828
|
py
|
Python
|
python-client/openapi_client/api/object_api.py
|
robopsi/dfcpub
|
cdfd32e81c642bc352549b43fb26a3ac6815ee84
|
[
"MIT"
] | null | null | null |
python-client/openapi_client/api/object_api.py
|
robopsi/dfcpub
|
cdfd32e81c642bc352549b43fb26a3ac6815ee84
|
[
"MIT"
] | null | null | null |
python-client/openapi_client/api/object_api.py
|
robopsi/dfcpub
|
cdfd32e81c642bc352549b43fb26a3ac6815ee84
|
[
"MIT"
] | 1
|
2020-07-03T00:35:21.000Z
|
2020-07-03T00:35:21.000Z
|
# coding: utf-8
"""
DFC
DFC is a scalable object-storage based caching system with Amazon and Google Cloud backends. # noqa: E501
OpenAPI spec version: 1.1.0
Contact: dfc-jenkins@nvidia.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from openapi_client.api_client import ApiClient
class ObjectApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete(self, bucket_name, object_name, **kwargs): # noqa: E501
"""Delete object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete(bucket_name, object_name, async=True)
>>> result = thread.get()
:param async bool
:param str bucket_name: Bucket name (required)
:param str object_name: Object name (required)
:param InputParameters input_parameters:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_with_http_info(bucket_name, object_name, **kwargs) # noqa: E501
else:
(data) = self.delete_with_http_info(bucket_name, object_name, **kwargs) # noqa: E501
return data
def delete_with_http_info(self, bucket_name, object_name, **kwargs): # noqa: E501
"""Delete object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_with_http_info(bucket_name, object_name, async=True)
>>> result = thread.get()
:param async bool
:param str bucket_name: Bucket name (required)
:param str object_name: Object name (required)
:param InputParameters input_parameters:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bucket_name', 'object_name', 'input_parameters'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bucket_name' is set
if ('bucket_name' not in params or
params['bucket_name'] is None):
raise ValueError("Missing the required parameter `bucket_name` when calling `delete`") # noqa: E501
# verify the required parameter 'object_name' is set
if ('object_name' not in params or
params['object_name'] is None):
raise ValueError("Missing the required parameter `object_name` when calling `delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'bucket_name' in params:
path_params['bucket-name'] = params['bucket_name'] # noqa: E501
if 'object_name' in params:
path_params['object-name'] = params['object_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'input_parameters' in params:
body_params = params['input_parameters']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/objects/{bucket-name}/{object-name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get(self, bucket_name, object_name, **kwargs): # noqa: E501
"""Get object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get(bucket_name, object_name, async=True)
>>> result = thread.get()
:param async bool
:param str bucket_name: Bucket name (required)
:param str object_name: Object name (required)
:param int offset: Starting byte from where the read needs to be performed
:param int length: Number of bytes that need to be returned starting from the offset
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_with_http_info(bucket_name, object_name, **kwargs) # noqa: E501
else:
(data) = self.get_with_http_info(bucket_name, object_name, **kwargs) # noqa: E501
return data
def get_with_http_info(self, bucket_name, object_name, **kwargs): # noqa: E501
"""Get object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_with_http_info(bucket_name, object_name, async=True)
>>> result = thread.get()
:param async bool
:param str bucket_name: Bucket name (required)
:param str object_name: Object name (required)
:param int offset: Starting byte from where the read needs to be performed
:param int length: Number of bytes that need to be returned starting from the offset
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bucket_name', 'object_name', 'offset', 'length'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bucket_name' is set
if ('bucket_name' not in params or
params['bucket_name'] is None):
raise ValueError("Missing the required parameter `bucket_name` when calling `get`") # noqa: E501
# verify the required parameter 'object_name' is set
if ('object_name' not in params or
params['object_name'] is None):
raise ValueError("Missing the required parameter `object_name` when calling `get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'bucket_name' in params:
path_params['bucket-name'] = params['bucket_name'] # noqa: E501
if 'object_name' in params:
path_params['object-name'] = params['object_name'] # noqa: E501
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'length' in params:
query_params.append(('length', params['length'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/octet-stream''text/plain']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/objects/{bucket-name}/{object-name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_properties(self, bucket_name, object_name, **kwargs): # noqa: E501
"""Query object properties # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_properties(bucket_name, object_name, async=True)
>>> result = thread.get()
:param async bool
:param str bucket_name: Bucket name (required)
:param str object_name: Object name (required)
:param bool check_cached: Check if the object is cached
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_properties_with_http_info(bucket_name, object_name, **kwargs) # noqa: E501
else:
(data) = self.get_properties_with_http_info(bucket_name, object_name, **kwargs) # noqa: E501
return data
def get_properties_with_http_info(self, bucket_name, object_name, **kwargs): # noqa: E501
"""Query object properties # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_properties_with_http_info(bucket_name, object_name, async=True)
>>> result = thread.get()
:param async bool
:param str bucket_name: Bucket name (required)
:param str object_name: Object name (required)
:param bool check_cached: Check if the object is cached
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bucket_name', 'object_name', 'check_cached'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_properties" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bucket_name' is set
if ('bucket_name' not in params or
params['bucket_name'] is None):
raise ValueError("Missing the required parameter `bucket_name` when calling `get_properties`") # noqa: E501
# verify the required parameter 'object_name' is set
if ('object_name' not in params or
params['object_name'] is None):
raise ValueError("Missing the required parameter `object_name` when calling `get_properties`") # noqa: E501
collection_formats = {}
path_params = {}
if 'bucket_name' in params:
path_params['bucket-name'] = params['bucket_name'] # noqa: E501
if 'object_name' in params:
path_params['object-name'] = params['object_name'] # noqa: E501
query_params = []
if 'check_cached' in params:
query_params.append(('check_cached', params['check_cached'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/objects/{bucket-name}/{object-name}', 'HEAD',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def perform_operation(self, bucket_name, object_name, input_parameters, **kwargs): # noqa: E501
"""Perform operations on object such as rename # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.perform_operation(bucket_name, object_name, input_parameters, async=True)
>>> result = thread.get()
:param async bool
:param str bucket_name: Bucket name (required)
:param str object_name: Object name (required)
:param InputParameters input_parameters: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.perform_operation_with_http_info(bucket_name, object_name, input_parameters, **kwargs) # noqa: E501
else:
(data) = self.perform_operation_with_http_info(bucket_name, object_name, input_parameters, **kwargs) # noqa: E501
return data
def perform_operation_with_http_info(self, bucket_name, object_name, input_parameters, **kwargs): # noqa: E501
"""Perform operations on object such as rename # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.perform_operation_with_http_info(bucket_name, object_name, input_parameters, async=True)
>>> result = thread.get()
:param async bool
:param str bucket_name: Bucket name (required)
:param str object_name: Object name (required)
:param InputParameters input_parameters: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bucket_name', 'object_name', 'input_parameters'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method perform_operation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bucket_name' is set
if ('bucket_name' not in params or
params['bucket_name'] is None):
raise ValueError("Missing the required parameter `bucket_name` when calling `perform_operation`") # noqa: E501
# verify the required parameter 'object_name' is set
if ('object_name' not in params or
params['object_name'] is None):
raise ValueError("Missing the required parameter `object_name` when calling `perform_operation`") # noqa: E501
# verify the required parameter 'input_parameters' is set
if ('input_parameters' not in params or
params['input_parameters'] is None):
raise ValueError("Missing the required parameter `input_parameters` when calling `perform_operation`") # noqa: E501
collection_formats = {}
path_params = {}
if 'bucket_name' in params:
path_params['bucket-name'] = params['bucket_name'] # noqa: E501
if 'object_name' in params:
path_params['object-name'] = params['object_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'input_parameters' in params:
body_params = params['input_parameters']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/objects/{bucket-name}/{object-name}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def put(self, bucket_name, object_name, **kwargs): # noqa: E501
"""Put object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.put(bucket_name, object_name, async=True)
>>> result = thread.get()
:param async bool
:param str bucket_name: Bucket name (required)
:param str object_name: Object name (required)
:param str from_id: Source target ID
:param str to_id: Destination target ID
:param file body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.put_with_http_info(bucket_name, object_name, **kwargs) # noqa: E501
else:
(data) = self.put_with_http_info(bucket_name, object_name, **kwargs) # noqa: E501
return data
def put_with_http_info(self, bucket_name, object_name, **kwargs): # noqa: E501
"""Put object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.put_with_http_info(bucket_name, object_name, async=True)
>>> result = thread.get()
:param async bool
:param str bucket_name: Bucket name (required)
:param str object_name: Object name (required)
:param str from_id: Source target ID
:param str to_id: Destination target ID
:param file body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bucket_name', 'object_name', 'from_id', 'to_id', 'body'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bucket_name' is set
if ('bucket_name' not in params or
params['bucket_name'] is None):
raise ValueError("Missing the required parameter `bucket_name` when calling `put`") # noqa: E501
# verify the required parameter 'object_name' is set
if ('object_name' not in params or
params['object_name'] is None):
raise ValueError("Missing the required parameter `object_name` when calling `put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'bucket_name' in params:
path_params['bucket-name'] = params['bucket_name'] # noqa: E501
if 'object_name' in params:
path_params['object-name'] = params['object_name'] # noqa: E501
query_params = []
if 'from_id' in params:
query_params.append(('from_id', params['from_id'])) # noqa: E501
if 'to_id' in params:
query_params.append(('to_id', params['to_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/octet-stream']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/objects/{bucket-name}/{object-name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 40.52381
| 128
| 0.610836
| 2,771
| 23,828
| 5.026705
| 0.068206
| 0.068203
| 0.050255
| 0.057434
| 0.936894
| 0.924402
| 0.915787
| 0.914351
| 0.910044
| 0.910044
| 0
| 0.015384
| 0.296164
| 23,828
| 587
| 129
| 40.592845
| 0.815157
| 0.072478
| 0
| 0.76489
| 0
| 0
| 0.211837
| 0.037067
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.012539
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5dcb8462c8ae228afed2f349582c6f98591aaff2
| 21,866
|
py
|
Python
|
scripts/type_extractor/tests/remove_json_types_tests.py
|
mehrdad-shokri/retdec
|
a82f16e97b163afe789876e0a819489c5b9b358e
|
[
"MIT",
"Zlib",
"BSD-3-Clause"
] | 4,816
|
2017-12-12T18:07:09.000Z
|
2019-04-17T02:01:04.000Z
|
scripts/type_extractor/tests/remove_json_types_tests.py
|
mehrdad-shokri/retdec
|
a82f16e97b163afe789876e0a819489c5b9b358e
|
[
"MIT",
"Zlib",
"BSD-3-Clause"
] | 514
|
2017-12-12T18:22:52.000Z
|
2019-04-16T16:07:11.000Z
|
scripts/type_extractor/tests/remove_json_types_tests.py
|
mehrdad-shokri/retdec
|
a82f16e97b163afe789876e0a819489c5b9b358e
|
[
"MIT",
"Zlib",
"BSD-3-Clause"
] | 579
|
2017-12-12T18:38:02.000Z
|
2019-04-11T13:32:53.000Z
|
"""Units tests for the type_extractor.remove_json_types module."""
import unittest
from type_extractor.remove_json_types import remove_qualifier_json_types
from type_extractor.remove_json_types import remove_unused_json_types
class RemoveUnusedJsonTypesTests(unittest.TestCase):
def test_all_unused_type_are_removed(self):
functions = {
"f": {
"decl": "int f();",
"header": "tx.h",
"name": "f",
"params": [],
"ret_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315"
}
}
types = {
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
},
"71fafc4e2fc1e47e234762a96b80512b6b5534c2": {
"name": "const",
"type": "qualifier",
"modified_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315"
},
"685e80366130387cb75c055248326976d16fdf8d": {
"name": "float",
"type": "floating_point_type"
},
"d8c550a1f49f312b1bf5709f7f7c7e25e1dfe210": {
"dimensions": [
10
],
"element_type": "71fafc4e2fc1e47e234762a96b80512b6b5534c2",
"type": "array"
},
"71fafc4e2fc1e47e234762a96b80512b6b5534c2": {
"members": [
{
"name": "a",
"type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315"
}
],
"name": "struct s",
"type": "struct",
}
}
expected_types = {
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
}
}
self.assertEqual(remove_unused_json_types(functions, types), expected_types)
def test_all_func_parameter_types_are_kept(self):
functions = {
"f": {
"decl": "int f(char a, float f);",
"header": "tx.h",
"name": "f",
"params": [
{
"name": "a",
"type": "71fafc4e2fc1e47e234762a96b80512b6b5534c2"
},
{
"name": "f",
"type": "685e80366130387cb75c055248326976d16fdf8d"
}
],
"ret_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315"
}
}
types = {
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
},
"685e80366130387cb75c055248326976d16fdf8d": {
"name": "float",
"type": "floating_point_type"
},
"71fafc4e2fc1e47e234762a96b80512b6b5534c2": {
"name": "char",
"type": "integral_type"
}
}
self.assertEqual(remove_unused_json_types(functions, types), types)
def test_element_type_of_array_is_kept(self):
functions = {
"f": {
"decl": "int f(char a[10]);",
"header": "tx.h",
"name": "f",
"params": [
{
"name": "a",
"type": "d8c550a1f49f312b1bf5709f7f7c7e25e1dfe210"
},
],
"ret_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315"
}
}
types = {
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
},
"71fafc4e2fc1e47e234762a96b80512b6b5534c2": {
"name": "char",
"type": "integral_type"
},
"d8c550a1f49f312b1bf5709f7f7c7e25e1dfe210": {
"dimensions": [
10
],
"element_type": "71fafc4e2fc1e47e234762a96b80512b6b5534c2",
"type": "array"
}
}
self.assertEqual(remove_unused_json_types(functions, types), types)
def test_typedefed_type_is_kept(self):
functions = {
"f": {
"decl": "INT f();",
"header": "tx.h",
"name": "f",
"params": [],
"ret_type": "71fafc4e2fc1e47e234762a96b80512b6b5534c2"
}
}
types = {
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
},
"71fafc4e2fc1e47e234762a96b80512b6b5534c2": {
"name": "INT",
"type": "typedef",
"typedefed_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315"
}
}
self.assertEqual(remove_unused_json_types(functions, types), types)
def test_function_type_params_are_kept(self):
functions = {
"f": {
"decl": "int f(int f(char));",
"header": "tx.h",
"name": "f",
"params": [
{
"name": "f",
"type": "361d6282a400aca2fb0ce4b769c85ee086a9ee4c"
}
],
"ret_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315"
}
}
types = {
"361d6282a400aca2fb0ce4b769c85ee086a9ee4c": {
"params": [
{
"name": "",
"type": "71fafc4e2fc1e47e234762a96b80512b6b5534c2"
}
],
"ret_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315",
"type": "function"
},
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
},
"71fafc4e2fc1e47e234762a96b80512b6b5534c2": {
"name": "char",
"type": "integral_type"
}
}
self.assertEqual(remove_unused_json_types(functions, types), types)
def test_pointed_type_is_kept(self):
functions = {
"f": {
"decl": "int * f();",
"header": "tx.h",
"name": "f",
"params": [],
"ret_type": "71fafc4e2fc1e47e234762a96b80512b6b5534c2"
}
}
types = {
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
},
"71fafc4e2fc1e47e234762a96b80512b6b5534c2": {
"type": "pointer",
"pointed_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315"
}
}
self.assertEqual(remove_unused_json_types(functions, types), types)
def test_const_modified_type_is_kept(self):
functions = {
"f": {
"decl": "const int f();",
"header": "tx.h",
"name": "f",
"params": [],
"ret_type": "71fafc4e2fc1e47e234762a96b80512b6b5534c2"
}
}
types = {
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
},
"71fafc4e2fc1e47e234762a96b80512b6b5534c2": {
"name": "const",
"type": "qualifier",
"modified_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315"
}
}
self.assertEqual(remove_unused_json_types(functions, types), types)
def test_struct_members_are_kept(self):
functions = {
"f": {
"decl": "struct s f();",
"header": "tx.h",
"name": "f",
"params": [],
"ret_type": "71fafc4e2fc1e47e234762a96b80512b6b5534c2"
}
}
types = {
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
},
"71fafc4e2fc1e47e234762a96b80512b6b5534c2": {
"members": [
{
"name": "a",
"type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315"
}
],
"name": "struct s",
"type": "structure",
}
}
self.assertEqual(remove_unused_json_types(functions, types), types)
class RemoveQualifierTypesTests(unittest.TestCase):
def test_remove_qualifier_types_from_function(self):
json = {
"functions": {
"f": {
"decl": "const int f(const int i);",
"header": "tx.h",
"name": "f",
"params": [
{
"name": "i",
"type": "0ff04d04cf6c73308eda9ef3c2a850b0b80e5666"
}
],
"ret_type": "0ff04d04cf6c73308eda9ef3c2a850b0b80e5666"
}
},
"types": {
"0ff04d04cf6c73308eda9ef3c2a850b0b80e5666": {
"modified_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315",
"name": "const",
"type": "qualifier"
},
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
}
}
}
remove_qualifier_json_types(json)
self.assertEqual(
json,
{
"functions": {
"f": {
"decl": "const int f(const int i);",
"header": "tx.h",
"name": "f",
"params": [
{
"name": "i",
"type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315"
}
],
"ret_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315"
}
},
"types": {
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
}
}
}
)
def test_remove_qualifier_types_from_array(self):
json = {
"functions": {
"f": {
"decl": "int f(const int i[]);",
"header": "tx.h",
"name": "f",
"params": [
{
"name": "i",
"type": "8a5702ae4925ef124198af3352b8673ae1b5c623"
}
],
"ret_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315"
}
},
"types": {
"0ff04d04cf6c73308eda9ef3c2a850b0b80e5666": {
"modified_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315",
"name": "const",
"type": "qualifier"
},
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
},
"8a5702ae4925ef124198af3352b8673ae1b5c623": {
"dimensions": [
""
],
"element_type": "0ff04d04cf6c73308eda9ef3c2a850b0b80e5666",
"type": "array"
}
}
}
remove_qualifier_json_types(json)
self.assertEqual(
json,
{
"functions": {
"f": {
"decl": "int f(const int i[]);",
"header": "tx.h",
"name": "f",
"params": [
{
"name": "i",
"type": "8a5702ae4925ef124198af3352b8673ae1b5c623"
}
],
"ret_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315"
}
},
"types": {
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
},
"8a5702ae4925ef124198af3352b8673ae1b5c623": {
"dimensions": [
""
],
"element_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315",
"type": "array"
}
}
}
)
def test_remove_qualifier_types_from_function_type(self):
json = {
"functions": {},
"types": {
"0ff04d04cf6c73308eda9ef3c2a850b0b80e5666": {
"modified_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315",
"name": "const",
"type": "qualifier"
},
"3962e448d156cb85e3bf7e1216efa8139119f4b4": {
"params": [],
"ret_type": "0ff04d04cf6c73308eda9ef3c2a850b0b80e5666",
"type": "function"
},
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
},
"5e04ab331d26527cb0f2c9d998df6250844e5616": {
"name": "f",
"type": "typedef",
"typedefed_type": "d67c28e05e53598da559c8acdf8e577fc3c70726"
},
"d67c28e05e53598da559c8acdf8e577fc3c70726": {
"pointed_type": "3962e448d156cb85e3bf7e1216efa8139119f4b4",
"type": "pointer"
}
}
}
remove_qualifier_json_types(json)
self.assertEqual(
json,
{
"functions": {},
"types": {
"3962e448d156cb85e3bf7e1216efa8139119f4b4": {
"params": [],
"ret_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315",
"type": "function"
},
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
},
"5e04ab331d26527cb0f2c9d998df6250844e5616": {
"name": "f",
"type": "typedef",
"typedefed_type": "d67c28e05e53598da559c8acdf8e577fc3c70726"
},
"d67c28e05e53598da559c8acdf8e577fc3c70726": {
"pointed_type": "3962e448d156cb85e3bf7e1216efa8139119f4b4",
"type": "pointer"
},
}
}
)
def test_remove_qualifier_types_struct_members(self):
json = {
"functions": {},
"types": {
"0ff04d04cf6c73308eda9ef3c2a850b0b80e5666": {
"modified_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315",
"name": "const",
"type": "qualifier"
},
"3d6186cdc4278ee703981d00c71201ca3344c592": {
"members": [
{
"name": "i",
"type": "8219838a8cbd6b107cf558a616256e894f773b5a"
},
{
"name": "j",
"type": "0ff04d04cf6c73308eda9ef3c2a850b0b80e5666"
}
],
"name": "struct s",
"type": "structure"
},
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
},
"8219838a8cbd6b107cf558a616256e894f773b5a": {
"name": "CINT",
"type": "typedef",
"typedefed_type": "0ff04d04cf6c73308eda9ef3c2a850b0b80e5666"
}
}
}
remove_qualifier_json_types(json)
self.assertEqual(
json,
{
"functions": {},
"types": {
"3d6186cdc4278ee703981d00c71201ca3344c592": {
"members": [
{
"name": "i",
"type": "8219838a8cbd6b107cf558a616256e894f773b5a"
},
{
"name": "j",
"type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315"
}
],
"name": "struct s",
"type": "structure"
},
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
},
"8219838a8cbd6b107cf558a616256e894f773b5a": {
"name": "CINT",
"type": "typedef",
"typedefed_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315"
}
}
}
)
def test_remove_qualifier_types_from_pointer(self):
json = {
"functions": {},
"types": {
"0ff04d04cf6c73308eda9ef3c2a850b0b80e5666": {
"modified_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315",
"name": "const",
"type": "qualifier"
},
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
},
"8c6e2fa96c2304299bc328f9652d0233776b100a": {
"name": "PCINT",
"type": "typedef",
"typedefed_type": "f5e774f604bf7ffbdaf2745cb9e37208465050e2"
},
"f5e774f604bf7ffbdaf2745cb9e37208465050e2": {
"pointed_type": "0ff04d04cf6c73308eda9ef3c2a850b0b80e5666",
"type": "pointer"
}
}
}
remove_qualifier_json_types(json)
self.assertEqual(
json,
{
"functions": {},
"types": {
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
},
"8c6e2fa96c2304299bc328f9652d0233776b100a": {
"name": "PCINT",
"type": "typedef",
"typedefed_type": "f5e774f604bf7ffbdaf2745cb9e37208465050e2"
},
"f5e774f604bf7ffbdaf2745cb9e37208465050e2": {
"pointed_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315",
"type": "pointer"
}
}
}
)
def test_remove_two_level_qualifier_types(self):
"""'const restrict int' should be substituted to int"""
json = {
"functions": {},
"types": {
"0ff04d04cf6c73308eda9ef3c2a850b0b80e5666": {
"modified_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315",
"name": "const",
"type": "qualifier"
},
"2eb539806be1e0d28a149acdbc952890d5b52320": {
"modified_type": "0ff04d04cf6c73308eda9ef3c2a850b0b80e5666",
"name": "restrict",
"type": "qualifier"
},
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
},
"a8bff8de7eac4c7fa1f24f64d1b73b8aae268620": {
"name": "CRINT",
"type": "typedef",
"typedefed_type": "2eb539806be1e0d28a149acdbc952890d5b52320"
}
}
}
remove_qualifier_json_types(json)
self.assertEqual(
json,
{
"functions": {},
"types": {
"46f8ab7c0cff9df7cd124852e26022a6bf89e315": {
"name": "int",
"type": "integral_type"
},
"a8bff8de7eac4c7fa1f24f64d1b73b8aae268620": {
"name": "CRINT",
"type": "typedef",
"typedefed_type": "46f8ab7c0cff9df7cd124852e26022a6bf89e315"
}
}
}
)
| 35.097913
| 84
| 0.413976
| 983
| 21,866
| 9.006104
| 0.092574
| 0.149102
| 0.043375
| 0.120976
| 0.836101
| 0.814978
| 0.804586
| 0.76347
| 0.748221
| 0.678979
| 0
| 0.261136
| 0.484588
| 21,866
| 622
| 85
| 35.154341
| 0.524401
| 0.005031
| 0
| 0.640893
| 0
| 0
| 0.335341
| 0.215222
| 0
| 0
| 0
| 0
| 0.024055
| 1
| 0.024055
| false
| 0
| 0.005155
| 0
| 0.032646
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5def047c5d4fe8082da23df479ba8bf64d50140e
| 4,824
|
py
|
Python
|
skimage/feature/tests/test_blob.py
|
RobbiNespu/scikit-image
|
a65b8af4bf0f5f71624a91ea5ce7812e80cae4cb
|
[
"BSD-3-Clause"
] | 8
|
2016-03-11T13:23:51.000Z
|
2021-12-19T10:43:26.000Z
|
skimage/feature/tests/test_blob.py
|
RobbiNespu/scikit-image
|
a65b8af4bf0f5f71624a91ea5ce7812e80cae4cb
|
[
"BSD-3-Clause"
] | null | null | null |
skimage/feature/tests/test_blob.py
|
RobbiNespu/scikit-image
|
a65b8af4bf0f5f71624a91ea5ce7812e80cae4cb
|
[
"BSD-3-Clause"
] | 4
|
2020-06-19T00:04:34.000Z
|
2021-02-23T07:24:00.000Z
|
import numpy as np
from skimage.draw import circle
from skimage.feature import blob_dog, blob_log, blob_doh
import math
from numpy.testing import assert_raises
def test_blob_dog():
r2 = math.sqrt(2)
img = np.ones((512, 512))
img3 = np.ones((5, 5, 5))
xs, ys = circle(400, 130, 5)
img[xs, ys] = 255
xs, ys = circle(100, 300, 25)
img[xs, ys] = 255
xs, ys = circle(200, 350, 45)
img[xs, ys] = 255
blobs = blob_dog(img, min_sigma=5, max_sigma=50)
radius = lambda x: r2 * x[2]
s = sorted(blobs, key=radius)
thresh = 5
b = s[0]
assert abs(b[0] - 400) <= thresh
assert abs(b[1] - 130) <= thresh
assert abs(radius(b) - 5) <= thresh
b = s[1]
assert abs(b[0] - 100) <= thresh
assert abs(b[1] - 300) <= thresh
assert abs(radius(b) - 25) <= thresh
b = s[2]
assert abs(b[0] - 200) <= thresh
assert abs(b[1] - 350) <= thresh
assert abs(radius(b) - 45) <= thresh
assert_raises(ValueError, blob_dog, img3)
def test_blob_log():
r2 = math.sqrt(2)
img = np.ones((512, 512))
img3 = np.ones((5, 5, 5))
xs, ys = circle(400, 130, 5)
img[xs, ys] = 255
xs, ys = circle(160, 50, 15)
img[xs, ys] = 255
xs, ys = circle(100, 300, 25)
img[xs, ys] = 255
xs, ys = circle(200, 350, 30)
img[xs, ys] = 255
blobs = blob_log(img, min_sigma=5, max_sigma=20, threshold=1)
radius = lambda x: r2 * x[2]
s = sorted(blobs, key=radius)
thresh = 3
b = s[0]
assert abs(b[0] - 400) <= thresh
assert abs(b[1] - 130) <= thresh
assert abs(radius(b) - 5) <= thresh
b = s[1]
assert abs(b[0] - 160) <= thresh
assert abs(b[1] - 50) <= thresh
assert abs(radius(b) - 15) <= thresh
b = s[2]
assert abs(b[0] - 100) <= thresh
assert abs(b[1] - 300) <= thresh
assert abs(radius(b) - 25) <= thresh
b = s[3]
assert abs(b[0] - 200) <= thresh
assert abs(b[1] - 350) <= thresh
assert abs(radius(b) - 30) <= thresh
# Testing log scale
blobs = blob_log(
img,
min_sigma=5,
max_sigma=20,
threshold=1,
log_scale=True)
b = s[0]
assert abs(b[0] - 400) <= thresh
assert abs(b[1] - 130) <= thresh
assert abs(radius(b) - 5) <= thresh
b = s[1]
assert abs(b[0] - 160) <= thresh
assert abs(b[1] - 50) <= thresh
assert abs(radius(b) - 15) <= thresh
b = s[2]
assert abs(b[0] - 100) <= thresh
assert abs(b[1] - 300) <= thresh
assert abs(radius(b) - 25) <= thresh
b = s[3]
assert abs(b[0] - 200) <= thresh
assert abs(b[1] - 350) <= thresh
assert abs(radius(b) - 30) <= thresh
assert_raises(ValueError, blob_log, img3)
def test_blob_doh():
img = np.ones((512, 512), dtype=np.uint8)
img3 = np.ones((5, 5, 5))
xs, ys = circle(400, 130, 20)
img[xs, ys] = 255
xs, ys = circle(460, 50, 30)
img[xs, ys] = 255
xs, ys = circle(100, 300, 40)
img[xs, ys] = 255
xs, ys = circle(200, 350, 50)
img[xs, ys] = 255
blobs = blob_doh(
img,
min_sigma=1,
max_sigma=60,
num_sigma=10,
threshold=.05)
radius = lambda x: x[2]
s = sorted(blobs, key=radius)
thresh = 4
b = s[0]
assert abs(b[0] - 400) <= thresh
assert abs(b[1] - 130) <= thresh
assert abs(radius(b) - 20) <= thresh
b = s[1]
assert abs(b[0] - 460) <= thresh
assert abs(b[1] - 50) <= thresh
assert abs(radius(b) - 30) <= thresh
b = s[2]
assert abs(b[0] - 100) <= thresh
assert abs(b[1] - 300) <= thresh
assert abs(radius(b) - 40) <= thresh
b = s[3]
assert abs(b[0] - 200) <= thresh
assert abs(b[1] - 350) <= thresh
assert abs(radius(b) - 50) <= thresh
# Testing log scale
blobs = blob_doh(
img,
min_sigma=1,
max_sigma=60,
num_sigma=10,
log_scale=True,
threshold=.05)
b = s[0]
assert abs(b[0] - 400) <= thresh
assert abs(b[1] - 130) <= thresh
assert abs(radius(b) - 20) <= thresh
b = s[1]
assert abs(b[0] - 460) <= thresh
assert abs(b[1] - 50) <= thresh
assert abs(radius(b) - 30) <= thresh
b = s[2]
assert abs(b[0] - 100) <= thresh
assert abs(b[1] - 300) <= thresh
assert abs(radius(b) - 40) <= thresh
b = s[3]
assert abs(b[0] - 200) <= thresh
assert abs(b[1] - 350) <= thresh
assert abs(radius(b) - 50) <= thresh
assert_raises(ValueError, blob_doh, img3)
def test_blob_overlap():
img = np.ones((512, 512), dtype=np.uint8)
xs, ys = circle(100, 100, 20)
img[xs, ys] = 255
xs, ys = circle(120, 100, 30)
img[xs, ys] = 255
blobs = blob_doh(
img,
min_sigma=1,
max_sigma=60,
num_sigma=10,
threshold=.05)
assert len(blobs) == 1
| 22.437209
| 65
| 0.537106
| 775
| 4,824
| 3.290323
| 0.096774
| 0.201176
| 0.14902
| 0.081961
| 0.872549
| 0.838824
| 0.811765
| 0.806275
| 0.756078
| 0.735686
| 0
| 0.126519
| 0.300373
| 4,824
| 214
| 66
| 22.542056
| 0.629037
| 0.007255
| 0
| 0.78125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.3875
| 1
| 0.025
| false
| 0
| 0.03125
| 0
| 0.05625
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b9056c4fc47f758b80cd9693466575594a49047d
| 135
|
py
|
Python
|
docs/build/lib/vicedtools/compass/__init__.py
|
gregbreese/vicedtools
|
2d32c8ea2437b67b72495e254b71e7f048bd8f9f
|
[
"Apache-2.0"
] | 2
|
2021-09-23T06:16:49.000Z
|
2021-11-02T00:53:22.000Z
|
docs/build/lib/vicedtools/compass/__init__.py
|
gregbreese/vicedtools
|
2d32c8ea2437b67b72495e254b71e7f048bd8f9f
|
[
"Apache-2.0"
] | null | null | null |
docs/build/lib/vicedtools/compass/__init__.py
|
gregbreese/vicedtools
|
2d32c8ea2437b67b72495e254b71e7f048bd8f9f
|
[
"Apache-2.0"
] | null | null | null |
from vicedtools.compass.exports import export_student_enrolments, export_student_details
from vicedtools.compass.reports import Reports
| 67.5
| 88
| 0.903704
| 17
| 135
| 6.941176
| 0.588235
| 0.237288
| 0.355932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059259
| 135
| 2
| 89
| 67.5
| 0.929134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
f8dd1176648ac9ed3fd9629cb819641779564f00
| 123
|
py
|
Python
|
base/handler/helpers/__init__.py
|
vralex/RumbleRunner
|
eb9889daf90846176af292d4e7411c41dac885c8
|
[
"MIT"
] | 2
|
2022-01-26T15:06:02.000Z
|
2022-02-03T05:14:52.000Z
|
base/handler/helpers/__init__.py
|
vralex/RumbleRunner
|
eb9889daf90846176af292d4e7411c41dac885c8
|
[
"MIT"
] | 1
|
2022-02-07T23:50:26.000Z
|
2022-02-07T23:50:26.000Z
|
base/handler/helpers/__init__.py
|
vralex/RumbleRunner
|
eb9889daf90846176af292d4e7411c41dac885c8
|
[
"MIT"
] | 1
|
2022-02-07T23:19:16.000Z
|
2022-02-07T23:19:16.000Z
|
from base.handler.helpers.actions import Actions
from base.handler.helpers.inline_menu import InlineMenuButton, InlineMenu
| 41
| 73
| 0.869919
| 16
| 123
| 6.625
| 0.625
| 0.150943
| 0.283019
| 0.415094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 123
| 2
| 74
| 61.5
| 0.929825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f8fdfddd9ef2af7dddad05952e451b4627f6fae1
| 14,102
|
py
|
Python
|
zaid/nasa_asteroids/archive/asteroids.py
|
peter-kinzelman/bdd_behave_demo
|
d703fff6948033c2b459dab133df11690d164664
|
[
"MIT"
] | null | null | null |
zaid/nasa_asteroids/archive/asteroids.py
|
peter-kinzelman/bdd_behave_demo
|
d703fff6948033c2b459dab133df11690d164664
|
[
"MIT"
] | 4
|
2020-02-12T02:51:50.000Z
|
2021-06-10T21:35:07.000Z
|
zaid/nasa_asteroids/archive/asteroids.py
|
peter-kinzelman/bdd_behave_demo
|
d703fff6948033c2b459dab133df11690d164664
|
[
"MIT"
] | 3
|
2019-06-20T15:00:29.000Z
|
2019-06-25T14:03:43.000Z
|
{u'near_earth_objects': {u'2015-09-09': [{u'is_sentry_object': False, u'links': {u'self': u'http://www.neowsapp.com/rest/v1/neo/3727636?api_key=DEMO_KEY'}, u'nasa_jpl_url': u'http://ssd.jpl.nasa.gov/sbdb.cgi?sstr=3727636', u'absolute_magnitude_h': 25.0, u'estimated_diameter': {u'feet': {u'estimated_diameter_max': 194.9956979785, u'estimated_diameter_min': 87.2047272}, u'miles': {u'estimated_diameter_max': 0.0369309908, u'estimated_diameter_min': 0.0165160412}, u'meters': {u'estimated_diameter_max': 59.4346868419, u'estimated_diameter_min': 26.58}, u'kilometers': {u'estimated_diameter_max': 0.0594346868, u'estimated_diameter_min': 0.02658}}, u'close_approach_data': [{u'epoch_date_close_approach': 1441759620000, u'orbiting_body': u'Earth', u'close_approach_date': u'2015-09-09', u'relative_velocity': {u'kilometers_per_second': u'6.3305268182', u'miles_per_hour': u'14160.7554396849', u'kilometers_per_hour': u'22789.8965456058'}, u'miss_distance': {u'astronomical': u'0.0137572541', u'miles': u'1278816.6432123046', u'lunar': u'5.3515718449', u'kilometers': u'2058055.910408767'}, u'close_approach_date_full': u'2015-Sep-09 00:47'}], u'neo_reference_id': u'3727636', u'is_potentially_hazardous_asteroid': False, u'id': u'3727636', u'name': u'(2015 RO83)'}, {u'is_sentry_object': False, u'links': {u'self': u'http://www.neowsapp.com/rest/v1/neo/3728370?api_key=DEMO_KEY'}, u'nasa_jpl_url': u'http://ssd.jpl.nasa.gov/sbdb.cgi?sstr=3728370', u'absolute_magnitude_h': 26.2, u'estimated_diameter': {u'feet': {u'estimated_diameter_max': 112.2083122258, u'estimated_diameter_min': 50.1810827555}, u'miles': {u'estimated_diameter_max': 0.021251567, u'estimated_diameter_min': 0.0095039897}, u'meters': {u'estimated_diameter_max': 34.201092472, u'estimated_diameter_min': 15.2951935344}, u'kilometers': {u'estimated_diameter_max': 0.0342010925, u'estimated_diameter_min': 0.0152951935}}, u'close_approach_data': [{u'epoch_date_close_approach': 1441770300000, u'orbiting_body': u'Earth', u'close_approach_date': u'2015-09-09', u'relative_velocity': {u'kilometers_per_second': u'10.8708086347', u'miles_per_hour': u'24316.9118349099', u'kilometers_per_hour': u'39134.9110848386'}, u'miss_distance': {u'astronomical': u'0.0253962519', u'miles': u'2360729.0647435314', u'lunar': u'9.8791419891', u'kilometers': u'3799225.190223453'}, u'close_approach_date_full': u'2015-Sep-09 03:45'}], u'neo_reference_id': u'3728370', u'is_potentially_hazardous_asteroid': False, u'id': u'3728370', u'name': u'(2015 ST)'}, {u'is_sentry_object': False, u'links': {u'self': u'http://www.neowsapp.com/rest/v1/neo/3117424?api_key=DEMO_KEY'}, u'nasa_jpl_url': u'http://ssd.jpl.nasa.gov/sbdb.cgi?sstr=3117424', u'absolute_magnitude_h': 21.3, u'estimated_diameter': {u'feet': {u'estimated_diameter_max': 1071.581062656, u'estimated_diameter_min': 479.2256199}, u'miles': {u'estimated_diameter_max': 0.2029508896, u'estimated_diameter_min': 0.090762397}, u'meters': {u'estimated_diameter_max': 326.6178974458, u'estimated_diameter_min': 146.0679642714}, u'kilometers': {u'estimated_diameter_max': 0.3266178974, u'estimated_diameter_min': 0.1460679643}}, u'close_approach_data': [{u'epoch_date_close_approach': 1441838820000, u'orbiting_body': u'Earth', u'close_approach_date': u'2015-09-09', u'relative_velocity': {u'kilometers_per_second': u'17.8745327473', u'miles_per_hour': u'39983.542302381', u'kilometers_per_hour': u'64348.3178901925'}, u'miss_distance': {u'astronomical': u'0.244177382', u'miles': u'22697705.350779892', u'lunar': u'94.985001598', u'kilometers': u'36528416.24937634'}, u'close_approach_date_full': u'2015-Sep-09 22:47'}], u'neo_reference_id': u'3117424', u'is_potentially_hazardous_asteroid': False, u'id': u'3117424', u'name': u'(2002 EC3)'}, {u'is_sentry_object': False, u'links': {u'self': u'http://www.neowsapp.com/rest/v1/neo/3719998?api_key=DEMO_KEY'}, u'nasa_jpl_url': u'http://ssd.jpl.nasa.gov/sbdb.cgi?sstr=3719998', u'absolute_magnitude_h': 20.4, u'estimated_diameter': {u'feet': {u'estimated_diameter_max': 1621.9035709942, u'estimated_diameter_min': 725.3373275385}, u'miles': {u'estimated_diameter_max': 0.3071786018, u'estimated_diameter_min': 0.137374447}, u'meters': {u'estimated_diameter_max': 494.3561926196, u'estimated_diameter_min': 221.0828103591}, u'kilometers': {u'estimated_diameter_max': 0.4943561926, u'estimated_diameter_min': 0.2210828104}}, u'close_approach_data': [{u'epoch_date_close_approach': 1441785960000, u'orbiting_body': u'Earth', u'close_approach_date': u'2015-09-09', u'relative_velocity': {u'kilometers_per_second': u'10.2523259364', u'miles_per_hour': u'22933.4278871121', u'kilometers_per_hour': u'36908.3733710062'}, u'miss_distance': {u'astronomical': u'0.2479056136', u'miles': u'23044266.1241125616', u'lunar': u'96.4352836904', u'kilometers': u'37086151.755603032'}, u'close_approach_date_full': u'2015-Sep-09 08:06'}], u'neo_reference_id': u'3719998', u'is_potentially_hazardous_asteroid': False, u'id': u'3719998', u'name': u'(2015 KN120)'}, {u'is_sentry_object': False, u'links': {u'self': u'http://www.neowsapp.com/rest/v1/neo/3727660?api_key=DEMO_KEY'}, u'nasa_jpl_url': u'http://ssd.jpl.nasa.gov/sbdb.cgi?sstr=3727660', u'absolute_magnitude_h': 24.1, u'estimated_diameter': {u'feet': {u'estimated_diameter_max': 295.1379320721, u'estimated_diameter_min': 131.9896957704}, u'miles': {u'estimated_diameter_max': 0.0558973165, u'estimated_diameter_min': 0.0249980399}, u'meters': {u'estimated_diameter_max': 89.9580388169, u'estimated_diameter_min': 40.2304579834}, u'kilometers': {u'estimated_diameter_max': 0.0899580388, u'estimated_diameter_min': 0.040230458}}, u'close_approach_data': [{u'epoch_date_close_approach': 1441800240000, u'orbiting_body': u'Earth', u'close_approach_date': u'2015-09-09', u'relative_velocity': {u'kilometers_per_second': u'4.1554986097', u'miles_per_hour': u'9295.4348400961', u'kilometers_per_hour': u'14959.7949950136'}, u'miss_distance': {u'astronomical': u'0.1990972664', u'miles': u'18507246.8706087184', u'lunar': u'77.4488366296', u'kilometers': u'29784526.976262568'}, u'close_approach_date_full': u'2015-Sep-09 12:04'}], u'neo_reference_id': u'3727660', u'is_potentially_hazardous_asteroid': False, u'id': u'3727660', u'name': u'(2015 RW83)'}, {u'is_sentry_object': False, u'links': {u'self': u'http://www.neowsapp.com/rest/v1/neo/3728374?api_key=DEMO_KEY'}, u'nasa_jpl_url': u'http://ssd.jpl.nasa.gov/sbdb.cgi?sstr=3728374', u'absolute_magnitude_h': 27.8, u'estimated_diameter': {u'feet': {u'estimated_diameter_max': 53.70627484, u'estimated_diameter_min': 24.0181762721}, u'miles': {u'estimated_diameter_max': 0.0101716395, u'estimated_diameter_min': 0.0045488955}, u'meters': {u'estimated_diameter_max': 16.3696720474, u'estimated_diameter_min': 7.3207398935}, u'kilometers': {u'estimated_diameter_max': 0.016369672, u'estimated_diameter_min': 0.0073207399}}, u'close_approach_data': [{u'epoch_date_close_approach': 1441773960000, u'orbiting_body': u'Earth', u'close_approach_date': u'2015-09-09', u'relative_velocity': {u'kilometers_per_second': u'18.4534006531', u'miles_per_hour': u'41278.4119209319', u'kilometers_per_hour': u'66432.2423511802'}, u'miss_distance': {u'astronomical': u'0.003635815', u'miles': u'337970.11379189', u'lunar': u'1.414332035', u'kilometers': u'543910.17971405'}, u'close_approach_date_full': u'2015-Sep-09 04:46'}], u'neo_reference_id': u'3728374', u'is_potentially_hazardous_asteroid': False, u'id': u'3728374', u'name': u'(2015 RU178)'}, {u'is_sentry_object': False, u'links': {u'self': u'http://www.neowsapp.com/rest/v1/neo/3771641?api_key=DEMO_KEY'}, u'nasa_jpl_url': u'http://ssd.jpl.nasa.gov/sbdb.cgi?sstr=3771641', u'absolute_magnitude_h': 25.9, u'estimated_diameter': {u'feet': {u'estimated_diameter_max': 128.8323800441, u'estimated_diameter_min': 57.6155918963}, u'miles': {u'estimated_diameter_max': 0.0244000636, u'estimated_diameter_min': 0.0109120402}, u'meters': {u'estimated_diameter_max': 39.2681081809, u'estimated_diameter_min': 17.561231848}, u'kilometers': {u'estimated_diameter_max': 0.0392681082, u'estimated_diameter_min': 0.0175612318}}, u'close_approach_data': [{u'epoch_date_close_approach': 1441795260000, u'orbiting_body': u'Earth', u'close_approach_date': u'2015-09-09', u'relative_velocity': {u'kilometers_per_second': u'4.6258305195', u'miles_per_hour': u'10347.5202891031', u'kilometers_per_hour': u'16652.989870254'}, u'miss_distance': {u'astronomical': u'0.0222398845', u'miles': u'2067326.389044407', u'lunar': u'8.6513150705', u'kilometers': u'3327039.350246015'}, u'close_approach_date_full': u'2015-Sep-09 10:41'}], u'neo_reference_id': u'3771641', u'is_potentially_hazardous_asteroid': False, u'id': u'3771641', u'name': u'(2017 FB3)'}, {u'is_sentry_object': False, u'links': {u'self': u'http://www.neowsapp.com/rest/v1/neo/3740494?api_key=DEMO_KEY'}, u'nasa_jpl_url': u'http://ssd.jpl.nasa.gov/sbdb.cgi?sstr=3740494', u'absolute_magnitude_h': 21.9, u'estimated_diameter': {u'feet': {u'estimated_diameter_max': 812.8773639568, u'estimated_diameter_min': 363.5298086356}, u'miles': {u'estimated_diameter_max': 0.1539539936, u'estimated_diameter_min': 0.068850319}, u'meters': {u'estimated_diameter_max': 247.7650126055, u'estimated_diameter_min': 110.8038821264}, u'kilometers': {u'estimated_diameter_max': 0.2477650126, u'estimated_diameter_min': 0.1108038821}}, u'close_approach_data': [{u'epoch_date_close_approach': 1441787100000, u'orbiting_body': u'Earth', u'close_approach_date': u'2015-09-09', u'relative_velocity': {u'kilometers_per_second': u'10.3183302881', u'miles_per_hour': u'23081.0730214138', u'kilometers_per_hour': u'37145.9890371002'}, u'miss_distance': {u'astronomical': u'0.3527410399', u'miles': u'32789327.6731826594', u'lunar': u'137.2162645211', u'kilometers': u'52769308.230625013'}, u'close_approach_date_full': u'2015-Sep-09 08:25'}], u'neo_reference_id': u'3740494', u'is_potentially_hazardous_asteroid': True, u'id': u'3740494', u'name': u'(2016 AF193)'}, {u'is_sentry_object': False, u'links': {u'self': u'http://www.neowsapp.com/rest/v1/neo/3758926?api_key=DEMO_KEY'}, u'nasa_jpl_url': u'http://ssd.jpl.nasa.gov/sbdb.cgi?sstr=3758926', u'absolute_magnitude_h': 21.9, u'estimated_diameter': {u'feet': {u'estimated_diameter_max': 812.8773639568, u'estimated_diameter_min': 363.5298086356}, u'miles': {u'estimated_diameter_max': 0.1539539936, u'estimated_diameter_min': 0.068850319}, u'meters': {u'estimated_diameter_max': 247.7650126055, u'estimated_diameter_min': 110.8038821264}, u'kilometers': {u'estimated_diameter_max': 0.2477650126, u'estimated_diameter_min': 0.1108038821}}, u'close_approach_data': [{u'epoch_date_close_approach': 1441761240000, u'orbiting_body': u'Earth', u'close_approach_date': u'2015-09-09', u'relative_velocity': {u'kilometers_per_second': u'16.428556935', u'miles_per_hour': u'36749.0390078261', u'kilometers_per_hour': u'59142.8049658787'}, u'miss_distance': {u'astronomical': u'0.2718402634', u'miles': u'25269130.7875993004', u'lunar': u'105.7458624626', u'kilometers': u'40666724.384878958'}, u'close_approach_date_full': u'2015-Sep-09 01:14'}], u'neo_reference_id': u'3758926', u'is_potentially_hazardous_asteroid': True, u'id': u'3758926', u'name': u'(2016 RT1)'}, {u'is_sentry_object': False, u'links': {u'self': u'http://www.neowsapp.com/rest/v1/neo/3773922?api_key=DEMO_KEY'}, u'nasa_jpl_url': u'http://ssd.jpl.nasa.gov/sbdb.cgi?sstr=3773922', u'absolute_magnitude_h': 21.1, u'estimated_diameter': {u'feet': {u'estimated_diameter_max': 1174.9652706022, u'estimated_diameter_min': 525.4604432536}, u'miles': {u'estimated_diameter_max': 0.2225312253, u'estimated_diameter_min': 0.0995189894}, u'meters': {u'estimated_diameter_max': 358.1294030194, u'estimated_diameter_min': 160.1603379786}, u'kilometers': {u'estimated_diameter_max': 0.358129403, u'estimated_diameter_min': 0.160160338}}, u'close_approach_data': [{u'epoch_date_close_approach': 1441777920000, u'orbiting_body': u'Earth', u'close_approach_date': u'2015-09-09', u'relative_velocity': {u'kilometers_per_second': u'16.7035418692', u'miles_per_hour': u'37364.1528071428', u'kilometers_per_hour': u'60132.7507290062'}, u'miss_distance': {u'astronomical': u'0.3204590906', u'miles': u'29788533.0567500236', u'lunar': u'124.6585862434', u'kilometers': u'47939997.375897022'}, u'close_approach_date_full': u'2015-Sep-09 05:52'}], u'neo_reference_id': u'3773922', u'is_potentially_hazardous_asteroid': False, u'id': u'3773922', u'name': u'(2017 GW6)'}, {u'is_sentry_object': False, u'links': {u'self': u'http://www.neowsapp.com/rest/v1/neo/2537342?api_key=DEMO_KEY'}, u'nasa_jpl_url': u'http://ssd.jpl.nasa.gov/sbdb.cgi?sstr=2537342', u'absolute_magnitude_h': 20.4, u'estimated_diameter': {u'feet': {u'estimated_diameter_max': 1621.9035709942, u'estimated_diameter_min': 725.3373275385}, u'miles': {u'estimated_diameter_max': 0.3071786018, u'estimated_diameter_min': 0.137374447}, u'meters': {u'estimated_diameter_max': 494.3561926196, u'estimated_diameter_min': 221.0828103591}, u'kilometers': {u'estimated_diameter_max': 0.4943561926, u'estimated_diameter_min': 0.2210828104}}, u'close_approach_data': [{u'epoch_date_close_approach': 1441785960000, u'orbiting_body': u'Earth', u'close_approach_date': u'2015-09-09', u'relative_velocity': {u'kilometers_per_second': u'10.252325974', u'miles_per_hour': u'22933.4279711959', u'kilometers_per_hour': u'36908.373506328'}, u'miss_distance': {u'astronomical': u'0.2479056215', u'miles': u'23044266.858463429', u'lunar': u'96.4352867635', u'kilometers': u'37086152.937426205'}, u'close_approach_date_full': u'2015-Sep-09 08:06'}], u'neo_reference_id': u'2537342', u'is_potentially_hazardous_asteroid': False, u'id': u'2537342', u'name': u'537342 (2015 KN120)'}]}, u'element_count': 11, u'links': {u'self': u'http://www.neowsapp.com/rest/v1/feed?start_date=2015-09-09&end_date=2015-09-09&detailed=false&api_key=DEMO_KEY', u'prev': u'http://www.neowsapp.com/rest/v1/feed?start_date=2015-09-08&end_date=2015-09-08&detailed=false&api_key=DEMO_KEY', u'next': u'http://www.neowsapp.com/rest/v1/feed?start_date=2015-09-10&end_date=2015-09-10&detailed=false&api_key=DEMO_KEY'}}
| 14,102
| 14,102
| 0.76124
| 2,326
| 14,102
| 4.377042
| 0.159931
| 0.09724
| 0.175032
| 0.090757
| 0.707789
| 0.628229
| 0.594441
| 0.516943
| 0.516943
| 0.4033
| 0
| 0.217031
| 0.051482
| 14,102
| 1
| 14,102
| 14,102
| 0.544109
| 0
| 0
| 0
| 0
| 3
| 0.662129
| 0.217613
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5d1901f626d4d4ee3fc83cc9f0dd740cacd74575
| 14,652
|
py
|
Python
|
woocommerce/woocommerce/doctype/woo_setting/woo_setting.py
|
supporterpsonic/woocommerce
|
67d1e2f6d430ed292b4e7034789a235908d2739c
|
[
"MIT"
] | null | null | null |
woocommerce/woocommerce/doctype/woo_setting/woo_setting.py
|
supporterpsonic/woocommerce
|
67d1e2f6d430ed292b4e7034789a235908d2739c
|
[
"MIT"
] | null | null | null |
woocommerce/woocommerce/doctype/woo_setting/woo_setting.py
|
supporterpsonic/woocommerce
|
67d1e2f6d430ed292b4e7034789a235908d2739c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2017, WooCommerce and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
import json
from woocommerce import API
class WooSetting(Document):
pass
@frappe.whitelist()
def get_data_woo_setting():
url_woo = frappe.db.sql("""
SELECT
ws.`name`,
ws.`url`,
ws.`consumer_key`,
ws.`consumer_secret`,
ws.`warehouse_item`,
ws.`price_list_item`
FROM `tabWoo Setting` ws
""", as_list=1)
if url_woo :
return url_woo
else :
return
@frappe.whitelist()
def connect_woo_api_v2(url, consumer_key, consumer_secret):
wcapi = API(
url = url,
consumer_key = consumer_key,
consumer_secret = consumer_secret,
wp_api=True,
version="wc/v1"
)
return wcapi
# ================================ PRODUCT ========================================== #
@frappe.whitelist()
def get_id_by_sku(sku, url, consumer_key, consumer_secret):
wcapi = connect_woo_api_v2(url, consumer_key, consumer_secret)
data_product = wcapi.get("products").json()
product_id = "tidak_ada"
if data_product :
for x in data_product :
if x["sku"] == sku :
product_id = x["id"]
break
# frappe.throw(str(product_id))
return product_id
@frappe.whitelist()
def membuat_atau_update_item_baru(doc, method):
# ambil data woo setting
get_woo_setting = get_data_woo_setting()
if get_woo_setting :
for i in get_woo_setting :
# connect ke woocommerce sesuai dengan woo_setting
wcapi = connect_woo_api_v2(str(i[1]), str(i[2]), str(i[3]))
# ambil product_id dari woocommerce berdasarkan item code / sku
product_id = str(get_id_by_sku(doc.item_code, str(i[1]), str(i[2]), str(i[3])))
if product_id == "tidak_ada" :
if doc.is_sales_item == 1 :
data = {
"name": doc.item_name,
"type": "simple",
"description": doc.description,
"short_description": doc.description,
"sku": doc.item_code,
"manage_stock": True
}
wcapi.post("products", data)
else :
if doc.disabled == 0 :
if doc.is_sales_item == 1 :
data = {
"name": doc.item_name,
"type": "simple",
"description": doc.description,
"short_description": doc.description,
"sku": doc.item_code,
"manage_stock": True,
"status": "publish",
}
wcapi.put("products/"+str(product_id), data)
elif doc.disabled == 1 :
if doc.is_sales_item == 1 :
data = {
"name": doc.item_name,
"type": "simple",
"description": doc.description,
"short_description": doc.description,
"sku": doc.item_code,
"manage_stock": True,
"status": "pending",
}
wcapi.put("products/"+str(product_id), data)
@frappe.whitelist()
def hapus_item_woocommerce(doc, method):
# ambil data woo setting
get_woo_setting = get_data_woo_setting()
if get_woo_setting :
for i in get_woo_setting :
# connect ke woocommerce sesuai dengan woo_setting
wcapi = connect_woo_api_v2(str(i[1]), str(i[2]), str(i[3]))
# ambil product_id dari woocommerce berdasarkan item code / sku
product_id = str(get_id_by_sku(doc.item_code, str(i[1]), str(i[2]), str(i[3])))
count = 0
if product_id == "tidak_ada" :
count = 0
else :
wcapi.delete("products/"+product_id+"?force=true")
frappe.msgprint("Item juga sudah terhapus dari dalam WooCommerce "+str(i[0]))
@frappe.whitelist()
def add_edit_item_price(doc, method):
# ambil data woo setting
url_woo = frappe.db.sql("""
SELECT
ws.`name`,
ws.`url`,
ws.`consumer_key`,
ws.`consumer_secret`,
ws.`warehouse_item`,
ws.`price_list_item`
FROM `tabWoo Setting` ws
WHERE ws.`price_list_item` = "{0}"
""".format(doc.price_list), as_list=1)
if url_woo :
for i in url_woo :
# connect ke woocommerce sesuai dengan woo_setting
wcapi = connect_woo_api_v2(str(i[1]), str(i[2]), str(i[3]))
# frappe.throw(" URL : "+str(i[1]) + " KEY : " + str(i[2]) + " SECRET : " + str(i[3]))
# ambil product_id dari woocommerce berdasarkan item code / sku
product_id = str(get_id_by_sku(doc.item_code, str(i[1]), str(i[2]), str(i[3])))
# frappe.throw(str(product_id))
# frappe.throw(str(wcapi))
count = 0
if product_id == "tidak_ada" :
count = 0
else :
data = {
"price": str(doc.price_list_rate),
"regular_price": str(doc.price_list_rate),
}
# frappe.throw(str(data))
wcapi.put("products/"+str(product_id), data)
@frappe.whitelist()
def delete_item_price(doc, method):
# ambil data woo setting
url_woo = frappe.db.sql("""
SELECT
ws.`name`,
ws.`url`,
ws.`consumer_key`,
ws.`consumer_secret`,
ws.`warehouse_item`,
ws.`price_list_item`
FROM `tabWoo Setting` ws
WHERE ws.`price_list_item` = "{0}"
""".format(doc.price_list), as_list=1)
if url_woo :
for i in url_woo :
# connect ke woocommerce sesuai dengan woo_setting
wcapi = connect_woo_api_v2(str(i[1]), str(i[2]), str(i[3]))
# ambil product_id dari woocommerce berdasarkan item code / sku
product_id = str(get_id_by_sku(doc.item_code, str(i[1]), str(i[2]), str(i[3])))
count = 0
if product_id == "tidak_ada" :
count = 0
else :
data = {
"price": str("0"),
"regular_price": str("0"),
}
wcapi.put("products/"+str(product_id), data)
# ================================= END PRODUCT ========================================== #
# ================================== STOCK PRODUCT ========================================== #
# submit stock entry
@frappe.whitelist()
def submit_cancel_document_stock_entry(doc, method):
if doc.purpose == "Material Receipt" :
for i in doc.items :
url_woo = frappe.db.sql("""
SELECT
ws.`name`,
ws.`url`,
ws.`consumer_key`,
ws.`consumer_secret`,
ws.`warehouse_item`,
ws.`price_list_item`
FROM `tabWoo Setting` ws
WHERE ws.`warehouse_item` = "{0}"
""".format(i.t_warehouse), as_list=1)
if url_woo :
for x in url_woo :
get_data_bin = frappe.db.sql("""
SELECT
b.`item_code`,
b.`warehouse`,
b.`actual_qty`
FROM `tabBin` b
WHERE b.`item_code` = "{0}"
AND b.`warehouse` = "{1}"
""".format(i.item_code, i.t_warehouse), as_list=1)
if get_data_bin :
# connect ke woocommerce sesuai dengan woo_setting
wcapi = connect_woo_api_v2(str(x[1]), str(x[2]), str(x[3]))
# ambil product_id dari woocommerce berdasarkan item code / sku
product_id = str(get_id_by_sku(i.item_code, str(x[1]), str(x[2]), str(x[3])))
data = {
"stock_quantity": float(get_data_bin[0][2])
}
wcapi.put("products/"+str(product_id), data)
else :
# connect ke woocommerce sesuai dengan woo_setting
wcapi = connect_woo_api_v2(str(x[1]), str(x[2]), str(x[3]))
# ambil product_id dari woocommerce berdasarkan item code / sku
product_id = str(get_id_by_sku(i.item_code, str(x[1]), str(x[2]), str(x[3])))
data = {
"stock_quantity": 0
}
wcapi.put("products/"+str(product_id), data)
elif doc.purpose == "Material Issue" :
for i in doc.items :
url_woo = frappe.db.sql("""
SELECT
ws.`name`,
ws.`url`,
ws.`consumer_key`,
ws.`consumer_secret`,
ws.`warehouse_item`,
ws.`price_list_item`
FROM `tabWoo Setting` ws
WHERE ws.`warehouse_item` = "{0}"
""".format(i.s_warehouse), as_list=1)
if url_woo :
for x in url_woo :
get_data_bin = frappe.db.sql("""
SELECT
b.`item_code`,
b.`warehouse`,
b.`actual_qty`
FROM `tabBin` b
WHERE b.`item_code` = "{0}"
AND b.`warehouse` = "{1}"
""".format(i.item_code, i.s_warehouse), as_list=1)
if get_data_bin :
# connect ke woocommerce sesuai dengan woo_setting
wcapi = connect_woo_api_v2(str(x[1]), str(x[2]), str(x[3]))
# ambil product_id dari woocommerce berdasarkan item code / sku
product_id = str(get_id_by_sku(i.item_code, str(x[1]), str(x[2]), str(x[3])))
data = {
"stock_quantity": float(get_data_bin[0][2])
}
wcapi.put("products/"+str(product_id), data)
else :
# connect ke woocommerce sesuai dengan woo_setting
wcapi = connect_woo_api_v2(str(x[1]), str(x[2]), str(x[3]))
# ambil product_id dari woocommerce berdasarkan item code / sku
product_id = str(get_id_by_sku(i.item_code, str(x[1]), str(x[2]), str(x[3])))
data = {
"stock_quantity": 0
}
wcapi.put("products/"+str(product_id), data)
elif doc.purpose == "Material Transfer" :
for i in doc.items :
# from warehouse
url_woo_t = frappe.db.sql("""
SELECT
ws.`name`,
ws.`url`,
ws.`consumer_key`,
ws.`consumer_secret`,
ws.`warehouse_item`,
ws.`price_list_item`
FROM `tabWoo Setting` ws
WHERE ws.`warehouse_item` = "{0}"
""".format(i.t_warehouse), as_list=1)
if url_woo_t :
for x in url_woo_t :
get_data_bin_t = frappe.db.sql("""
SELECT
b.`item_code`,
b.`warehouse`,
b.`actual_qty`
FROM `tabBin` b
WHERE b.`item_code` = "{0}"
AND b.`warehouse` = "{1}"
""".format(i.item_code, i.t_warehouse), as_list=1)
if get_data_bin_t :
# connect ke woocommerce sesuai dengan woo_setting
wcapi = connect_woo_api_v2(str(x[1]), str(x[2]), str(x[3]))
# ambil product_id dari woocommerce berdasarkan item code / sku
product_id = str(get_id_by_sku(i.item_code, str(x[1]), str(x[2]), str(x[3])))
data = {
"stock_quantity": float(get_data_bin_t[0][2])
}
wcapi.put("products/"+str(product_id), data)
else :
# connect ke woocommerce sesuai dengan woo_setting
wcapi = connect_woo_api_v2(str(x[1]), str(x[2]), str(x[3]))
# ambil product_id dari woocommerce berdasarkan item code / sku
product_id = str(get_id_by_sku(i.item_code, str(x[1]), str(x[2]), str(x[3])))
data = {
"stock_quantity": 0
}
wcapi.put("products/"+str(product_id), data)
# source warehouse
url_woo_s = frappe.db.sql("""
SELECT
ws.`name`,
ws.`url`,
ws.`consumer_key`,
ws.`consumer_secret`,
ws.`warehouse_item`,
ws.`price_list_item`
FROM `tabWoo Setting` ws
WHERE ws.`warehouse_item` = "{0}"
""".format(i.s_warehouse), as_list=1)
if url_woo_s :
for x in url_woo_t :
get_data_bin_s = frappe.db.sql("""
SELECT
b.`item_code`,
b.`warehouse`,
b.`actual_qty`
FROM `tabBin` b
WHERE b.`item_code` = "{0}"
AND b.`warehouse` = "{1}"
""".format(i.item_code, i.s_warehouse), as_list=1)
if get_data_bin_s :
# connect ke woocommerce sesuai dengan woo_setting
wcapi = connect_woo_api_v2(str(x[1]), str(x[2]), str(x[3]))
# ambil product_id dari woocommerce berdasarkan item code / sku
product_id = str(get_id_by_sku(i.item_code, str(x[1]), str(x[2]), str(x[3])))
data = {
"stock_quantity": float(get_data_bin_s[0][2])
}
wcapi.put("products/"+str(product_id), data)
else :
# connect ke woocommerce sesuai dengan woo_setting
wcapi = connect_woo_api_v2(str(x[1]), str(x[2]), str(x[3]))
# ambil product_id dari woocommerce berdasarkan item code / sku
product_id = str(get_id_by_sku(i.item_code, str(x[1]), str(x[2]), str(x[3])))
data = {
"stock_quantity": 0
}
wcapi.put("products/"+str(product_id), data)
# submit purchase receipt
@frappe.whitelist()
def submit_cancel_document_purchase_receipt(doc, method):
for i in doc.items :
url_woo = frappe.db.sql("""
SELECT
ws.`name`,
ws.`url`,
ws.`consumer_key`,
ws.`consumer_secret`,
ws.`warehouse_item`,
ws.`price_list_item`
FROM `tabWoo Setting` ws
WHERE ws.`warehouse_item` = "{0}"
""".format(i.warehouse), as_list=1)
if url_woo :
for x in url_woo :
get_data_bin = frappe.db.sql("""
SELECT
b.`item_code`,
b.`warehouse`,
b.`actual_qty`
FROM `tabBin` b
WHERE b.`item_code` = "{0}"
AND b.`warehouse` = "{1}"
""".format(i.item_code, i.warehouse), as_list=1)
if get_data_bin :
# connect ke woocommerce sesuai dengan woo_setting
wcapi = connect_woo_api_v2(str(x[1]), str(x[2]), str(x[3]))
# ambil product_id dari woocommerce berdasarkan item code / sku
product_id = str(get_id_by_sku(i.item_code, str(x[1]), str(x[2]), str(x[3])))
data = {
"stock_quantity": float(get_data_bin[0][2])
}
wcapi.put("products/"+str(product_id), data)
else :
# connect ke woocommerce sesuai dengan woo_setting
wcapi = connect_woo_api_v2(str(x[1]), str(x[2]), str(x[3]))
# ambil product_id dari woocommerce berdasarkan item code / sku
product_id = str(get_id_by_sku(i.item_code, str(x[1]), str(x[2]), str(x[3])))
data = {
"stock_quantity": 0
}
wcapi.put("products/"+str(product_id), data)
# submit stock reconcilliation
@frappe.whitelist()
def submit_cancel_document_stock_reconcilliation(doc, method):
for i in doc.items :
url_woo = frappe.db.sql("""
SELECT
ws.`name`,
ws.`url`,
ws.`consumer_key`,
ws.`consumer_secret`,
ws.`warehouse_item`,
ws.`price_list_item`
FROM `tabWoo Setting` ws
WHERE ws.`warehouse_item` = "{0}"
""".format(i.warehouse), as_list=1)
if url_woo :
for x in url_woo :
get_data_bin = frappe.db.sql("""
SELECT
b.`item_code`,
b.`warehouse`,
b.`actual_qty`
FROM `tabBin` b
WHERE b.`item_code` = "{0}"
AND b.`warehouse` = "{1}"
""".format(i.item_code, i.warehouse), as_list=1)
if get_data_bin :
# connect ke woocommerce sesuai dengan woo_setting
wcapi = connect_woo_api_v2(str(x[1]), str(x[2]), str(x[3]))
# ambil product_id dari woocommerce berdasarkan item code / sku
product_id = str(get_id_by_sku(i.item_code, str(x[1]), str(x[2]), str(x[3])))
data = {
"stock_quantity": float(get_data_bin[0][2])
}
wcapi.put("products/"+str(product_id), data)
else :
# connect ke woocommerce sesuai dengan woo_setting
wcapi = connect_woo_api_v2(str(x[1]), str(x[2]), str(x[3]))
# ambil product_id dari woocommerce berdasarkan item code / sku
product_id = str(get_id_by_sku(i.item_code, str(x[1]), str(x[2]), str(x[3])))
data = {
"stock_quantity": 0
}
wcapi.put("products/"+str(product_id), data)
# ================================== END STOCK PRODUCT ========================================== #
| 27.083179
| 99
| 0.610906
| 2,163
| 14,652
| 3.917245
| 0.06565
| 0.03399
| 0.014163
| 0.02266
| 0.874071
| 0.863331
| 0.84787
| 0.83772
| 0.83772
| 0.812935
| 0
| 0.016646
| 0.220994
| 14,652
| 541
| 99
| 27.083179
| 0.725688
| 0.180726
| 0
| 0.793194
| 0
| 0
| 0.294073
| 0.015821
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026178
| false
| 0.002618
| 0.013089
| 0
| 0.052356
| 0.002618
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5d39cd19ad53569418af45e1d523af8ba2d2f416
| 118
|
py
|
Python
|
left_break/views.py
|
sebleier/Left-Break
|
689a9a8486c4696a45df58c88e1b6473dd145ba1
|
[
"BSD-3-Clause"
] | 1
|
2019-06-13T16:18:49.000Z
|
2019-06-13T16:18:49.000Z
|
left_break/views.py
|
sebleier/Left-Break
|
689a9a8486c4696a45df58c88e1b6473dd145ba1
|
[
"BSD-3-Clause"
] | null | null | null |
left_break/views.py
|
sebleier/Left-Break
|
689a9a8486c4696a45df58c88e1b6473dd145ba1
|
[
"BSD-3-Clause"
] | null | null | null |
from django.shortcuts import render_to_response
def homepage(request):
return render_to_response("homepage.html")
| 29.5
| 47
| 0.822034
| 16
| 118
| 5.8125
| 0.75
| 0.172043
| 0.344086
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101695
| 118
| 4
| 48
| 29.5
| 0.877358
| 0
| 0
| 0
| 0
| 0
| 0.109244
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
5398769d85ac2eaa8a2924d26f0e9e073944ee38
| 107
|
py
|
Python
|
src/flaskdoc/pallets/__init__.py
|
kulgan/flaskdoc
|
e61fcbc246bcc3695c0e7dcb474067d47a6d70f0
|
[
"Apache-2.0"
] | 4
|
2020-08-17T03:07:26.000Z
|
2021-06-24T13:01:56.000Z
|
src/flaskdoc/pallets/__init__.py
|
kulgan/flaskdoc
|
e61fcbc246bcc3695c0e7dcb474067d47a6d70f0
|
[
"Apache-2.0"
] | 14
|
2019-10-09T13:50:43.000Z
|
2020-08-17T02:35:55.000Z
|
src/flaskdoc/pallets/__init__.py
|
kulgan/flaskdoc
|
e61fcbc246bcc3695c0e7dcb474067d47a6d70f0
|
[
"Apache-2.0"
] | 2
|
2020-08-09T06:10:24.000Z
|
2022-03-06T11:23:30.000Z
|
from flaskdoc.pallets.app import Flask, register_openapi
from flaskdoc.pallets.blueprints import Blueprint
| 35.666667
| 56
| 0.869159
| 14
| 107
| 6.571429
| 0.714286
| 0.26087
| 0.413043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084112
| 107
| 2
| 57
| 53.5
| 0.938776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 8
|
539f1dc1fb617f6c27fd6dabd843726ea8651a5c
| 6,269
|
py
|
Python
|
Tests/Manual Pytest/ManualPytest.py
|
Sports-and-Business/Minecraft
|
65c2e04eb0403206f62d5b4367611fe0601b1bf4
|
[
"MIT"
] | 1
|
2021-09-26T18:33:49.000Z
|
2021-09-26T18:33:49.000Z
|
Tests/Manual Pytest/ManualPytest.py
|
Sports-and-Business/Minecraft
|
65c2e04eb0403206f62d5b4367611fe0601b1bf4
|
[
"MIT"
] | null | null | null |
Tests/Manual Pytest/ManualPytest.py
|
Sports-and-Business/Minecraft
|
65c2e04eb0403206f62d5b4367611fe0601b1bf4
|
[
"MIT"
] | 1
|
2019-03-14T16:26:02.000Z
|
2019-03-14T16:26:02.000Z
|
import pytest
from main import cube_vertices
from main import tex_coord
from main import Model
from main import normalize
m = Model()
#Test the verticies and bounds, why is this useful?
class mainTests():
position = (10,10,10)
assert normalize(position) == (10,10,10)
assert normalize(position) != (11,9,10)
position = (1,-1,100)
assert normalize(position) == (1,-1,100)
assert normalize(position) != (1,1,100)
#test that the cube is defined correctly
#x,y,z,n
#x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top
#x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom
#x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left
#x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right
#x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front
#x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back
assert cube_vertices(1,1,1,1) == [0, 2, 0, 0, 2, 2, 2, 2, 2, 2, 2, 0,
0, 0, 0, 2, 0, 0, 2, 0, 2, 0, 0, 2,
0, 0, 0, 0, 0, 2, 0, 2, 2, 0, 2, 0,
2, 0, 2, 2, 0, 0, 2, 2, 0, 2, 2, 2,
0, 0, 2, 2, 0, 2, 2, 2, 2, 0, 2, 2,
2, 0, 0, 0, 0, 0, 0, 2, 0, 2, 2, 0]
assert cube_vertices(1,0,0,1) != [0, 2, 0, 0, 2, 2, 2, 2, 2, 2, 2, 0,
0, 0, 0, 2, 0, 0, 2, 0, 2, 0, 0, 2,
0, 0, 0, 0, 0, 2, 0, 2, 2, 0, 2, 0,
2, 0, 2, 2, 0, 0, 2, 2, 0, 2, 2, 2,
0, 0, 2, 2, 0, 2, 2, 2, 2, 0, 2, 2,
2, 0, 0, 0, 0, 0, 0, 2, 0, 2, 2, 0]
assert cube_vertices(1,1,1,0) != [0, 2, 0, 0, 2, 2, 2, 2, 2, 2, 2, 0,
0, 0, 0, 2, 0, 0, 2, 0, 2, 0, 0, 2,
0, 0, 0, 0, 0, 2, 0, 2, 2, 0, 2, 0,
2, 0, 2, 2, 0, 0, 2, 2, 0, 2, 2, 2,
0, 0, 2, 2, 0, 2, 2, 2, 2, 0, 2, 2,
2, 0, 0, 0, 0, 0, 0, 2, 0, 2, 2, 0]
#dx, dy, dx + m, dy, dx + m, dy + m, dx, dy + m
#Test that bounds of the square are correct
assert tex_coord(1,1, n=4) == (.25, .25, .5, .25, .5, .5, .25, .5)
assert tex_coord(2,1, n=4) != (.25, .25, .5, .25, .5, .5, .25, .5)
assert tex_coord(1,2, n=4) != (.25, .25, .5, .25, .5, .5, .25, .5)
assert tex_coord(2,1, n=4) == (.5, .25, .75, .25, .75, .5, .5, .5)
assert tex_coord(8,2, n=4) != (.25, .25, .5, .25, .5, .5, .25, .5)
assert tex_coord(2,24, n=4) != (.5, .25, .75, .25, .75, .5, .5, .5)
assert tex_coord(1,1, n=4) != (1, 1, 2, 1, 2, 2, 1, 2)
assert tex_coord(3,2, n=4) == (.75, .5, 1, .5, 1, .75, .75, .75)
assert tex_coord(3,-2, n=4) == (.75, -.5, 1, -.5, 1, -.25, .75, -.25)
assert tex_coord(-3,-2, n=4) != (.75, -.5, 1, -.5, .1, -.25, .75, -.25)
assert tex_coord(8,8, n=4) != (2, 2, 2.25, 2, 2.25, 2.25, 2, 2.26)
assert tex_coord(8,8, n=4) == (2, 2, 2.25, 2, 2.25, 2.25, 2, 2.25)
position = (1,0,8)
assert normalize(position) == (1,0,8)
assert normalize(position) != (1,0,8.25)
position = (-.25,-1,12)
assert normalize(position) != (-.25,-1,12)
assert normalize(position) != (.25,1,1)
#x,y,z,n
#x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top
#x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom
#x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left
#x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right
#x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front
#x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back
assert cube_vertices(0,0,0,0) == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
assert cube_vertices(2,3,4,5) == [-3, 8, -1, -3, 8, 9, 7, 8, 9, 7, 8, -1,
-3, -2, -1, 7, -2, -1, 7, -2, 9, -3, -2, 9,
-3, -2, -1, -3, -2, 9, -3, 8, 9, -3, 8, -1,
7, -2, 9, 7, -2, -1, 7, 8, -1, 7, 8, 9,
-3, -2, 9, 7, -2, 9, 7, 8, 9, -3, 8, 9,
7, -2, -1, -3, -2, -1, -3, 8, -1, 7, 8, -1]
assert cube_vertices(5,0,0,0) != [0, 2, 0, 0, 2, 2, 2, 2, 2, 2, 2, 0,
0, 0, 0, 2, 0, 0, 2, 0, 2, 0, 0, 2,
0, 0, 0, 0, 0, 2, 0, 2, 2, 0, 2, 0,
2, 0, 2, 2, 0, 0, 2, 2, 0, 2, 2, 2,
0, 0, 2, 2, 0, 2, 2, 2, 2, 0, 2, 2,
2, 0, 0, 0, 0, 0, 0, 2, 0, 2, 2, 0]
assert cube_vertices(-1,1,1,1) != [0, 2, 0, 0, 2, 2, 2, 2, 2, 2, 2, 0,
0, 0, 0, 2, 0, 0, 2, 0, 2, 0, 0, 2,
0, 0, 0, 0, 0, 2, 0, 2, 2, 0, 2, 0,
2, 0, 2, 2, 0, 0, 2, 2, 0, 2, 2, 2,
0, 0, 2, 2, 0, 2, 2, 2, 2, 0, 2, 2,
2, 0, 0, 0, 0, 0, 0, 2, 0, 2, 2, 0]
assert cube_vertices(2,3,4,-5) != [-3, 8, -1, -3, 8, 9, 7, 8, 9, 7, 8, -1,
-3, -2, -1, 7, -2, -1, 7, -2, 9, -3, -2, 9,
-3, -2, -1, -3, -2, 9, -3, 8, 9, -3, 8, -1,
7, -2, 9, 7, -2, -1, 7, 8, -1, 7, 8, 9,
-3, -2, 9, 7, -2, 9, 7, 8, 9, -3, 8, 9,
7, -2, -1, -3, -2, -1, -3, 8, -1, 7, 8, -1]
| 55.973214
| 82
| 0.320944
| 1,253
| 6,269
| 1.588188
| 0.047885
| 0.165829
| 0.182412
| 0.209045
| 0.850251
| 0.848744
| 0.848744
| 0.80603
| 0.805025
| 0.771357
| 0
| 0.247021
| 0.451109
| 6,269
| 111
| 83
| 56.477477
| 0.331299
| 0.142447
| 0
| 0.493671
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.35443
| 1
| 0
| false
| 0
| 0.063291
| 0
| 0.126582
| 0
| 0
| 0
| 1
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
53ae75d8db27b39f4426ef8b7b7b944a9d60ebd6
| 212
|
py
|
Python
|
pygar_me/exceptions.py
|
mbodock/pygar-me
|
22e51fa5e490c70527c540ef65479a1bacd548ee
|
[
"MIT"
] | 1
|
2015-08-25T20:34:28.000Z
|
2015-08-25T20:34:28.000Z
|
pygar_me/exceptions.py
|
mbodock/pygar-me
|
22e51fa5e490c70527c540ef65479a1bacd548ee
|
[
"MIT"
] | null | null | null |
pygar_me/exceptions.py
|
mbodock/pygar-me
|
22e51fa5e490c70527c540ef65479a1bacd548ee
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
class PygarmeApiError(Exception): pass
class PygarmeTransactionApiError(Exception): pass
class PygarmeTransactionError(Exception): pass
class NotPaidException(PygarmeTransactionError): pass
| 21.2
| 53
| 0.839623
| 19
| 212
| 9.368421
| 0.526316
| 0.219101
| 0.303371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005181
| 0.089623
| 212
| 9
| 54
| 23.555556
| 0.917098
| 0.070755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
9908fa29bdf21e6214766175b2dee7945e91a087
| 1,583
|
py
|
Python
|
repos/system_upgrade/el8toel9/actors/firewalldcheckallowzonedrifting/tests/component_test_firewalldcheckallowzonedrifting.py
|
sm00th/leapp-repository
|
1c171ec3a5f9260a3c6f84a9b15cad78a875ac61
|
[
"Apache-2.0"
] | null | null | null |
repos/system_upgrade/el8toel9/actors/firewalldcheckallowzonedrifting/tests/component_test_firewalldcheckallowzonedrifting.py
|
sm00th/leapp-repository
|
1c171ec3a5f9260a3c6f84a9b15cad78a875ac61
|
[
"Apache-2.0"
] | 1
|
2022-03-07T15:34:11.000Z
|
2022-03-07T15:35:15.000Z
|
repos/system_upgrade/el8toel9/actors/firewalldcheckallowzonedrifting/tests/component_test_firewalldcheckallowzonedrifting.py
|
sm00th/leapp-repository
|
1c171ec3a5f9260a3c6f84a9b15cad78a875ac61
|
[
"Apache-2.0"
] | null | null | null |
from leapp.models import FirewalldGlobalConfig, FirewallsFacts, FirewallStatus
from leapp.reporting import Report
def test_actor_firewalldcheckallowzonedrifting(current_actor_context):
status = FirewallStatus(enabled=True, active=True)
current_actor_context.feed(FirewallsFacts(firewalld=status,
iptables=status,
ip6tables=status))
current_actor_context.feed(FirewalldGlobalConfig(allowzonedrifting=True))
current_actor_context.run()
assert current_actor_context.consume(Report)
def test_actor_firewalldcheckallowzonedrifting_negative(current_actor_context):
status = FirewallStatus(enabled=False, active=True)
current_actor_context.feed(FirewallsFacts(firewalld=status,
iptables=status,
ip6tables=status))
current_actor_context.feed(FirewalldGlobalConfig(allowzonedrifting=True))
current_actor_context.run()
assert not current_actor_context.consume(Report)
def test_actor_firewalldcheckallowzonedrifting_negative2(current_actor_context):
status = FirewallStatus(enabled=True, active=True)
current_actor_context.feed(FirewallsFacts(firewalld=status,
iptables=status,
ip6tables=status))
current_actor_context.feed(FirewalldGlobalConfig(allowzonedrifting=False))
current_actor_context.run()
assert not current_actor_context.consume(Report)
| 43.972222
| 80
| 0.688566
| 139
| 1,583
| 7.568345
| 0.223022
| 0.171103
| 0.270913
| 0.131179
| 0.884981
| 0.838403
| 0.794677
| 0.794677
| 0.794677
| 0.68251
| 0
| 0.00337
| 0.250158
| 1,583
| 35
| 81
| 45.228571
| 0.882898
| 0
| 0
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 1
| 0.115385
| false
| 0
| 0.076923
| 0
| 0.192308
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
54cde72c2a05acd9f72f33eb05f7ac3c64471f58
| 25,198
|
py
|
Python
|
AI.py
|
DanielPri/XrudderAI
|
5c3e690c24511c05a205387431896cb5880e4b33
|
[
"MIT"
] | null | null | null |
AI.py
|
DanielPri/XrudderAI
|
5c3e690c24511c05a205387431896cb5880e4b33
|
[
"MIT"
] | null | null | null |
AI.py
|
DanielPri/XrudderAI
|
5c3e690c24511c05a205387431896cb5880e4b33
|
[
"MIT"
] | 1
|
2021-03-24T02:50:45.000Z
|
2021-03-24T02:50:45.000Z
|
from Board import Board
from Player import Player
from TileColor import TileColor
import copy
import random
class AI(Player):
def __init__(self, board, name, color, opponent_color):
super().__init__(board, name, color)
self.opponent_color = opponent_color
self.max_depth = 3
self.best_moves = []
# ------------------------------------------------README--------------------------------------------------------
# how this AI will work:
# Decide next move creates a board with a token in first unoccupied tile
# it will do it for whoever's turn it is, and will do a MIN or MAX accordingly
# it runs heuristic_for_board on that board
# it remembers the heuristic value for that board in current_heuristic_value
# it loops: makes a board with a token in the second unoccupied tile
# goes through the same steps, replacing previous current_heuristic_value if better are found
# it then does that again by iterating through all its placed tokens (if there are any)
# and try all the possible moves for those tokens and see their heuristic values
# (remember, moves are different from placing)
# at the end, the current_heuristic_value with the best value will be the one used to create the move
def play_turn(self, moves):
self.best_moves = [] # reset the best moves
imaginary_board = copy.deepcopy(self.board)
self.mini_max(imaginary_board, 1, False)
position = random.choice(self.best_moves)
self.play_tile(self.select_tile(position))
self.played_pieces.append(position)
return 0
def mini_max(self, board, depth, is_maximizing_player):
# print('~~~~~~~~~~~~~~~~ imaginary board ~~~~~~~~~~~~~~~~ ')
# board.draw()
if depth is self.max_depth:
return self.get_board_heuristic(board)
if is_maximizing_player:
best_value = -500000000000000000
for number in range(1, len(self.board.tiles) + 1):
for letter in self.board.letterMap:
if self.select_tile_on_imaginary_board(letter + str(number), board).get_color() != TileColor.BLANK \
and self.select_tile_on_imaginary_board(letter + str(number), board).get_color() == self.opponent_color:
# check the possible position to move the tile_token
self.check_max_moving_heuristic(letter, number, board, depth, best_value)
elif self.select_tile_on_imaginary_board(letter + str(number), board).get_color() == TileColor.BLANK:
new_board = self.play_imaginary_turn_placing(board, letter, number, self.opponent_color)
current_value = self.mini_max(new_board, depth + 1, False)
if current_value > best_value:
best_value = current_value
self.best_moves.clear()
self.best_moves.append(letter + str(number))
elif current_value == best_value:
self.best_moves.append(letter + str(number))
else:
continue
return best_value
else:
best_value = 500000000000000000
for number in range(1, len(self.board.tiles) + 1):
for letter in self.board.letterMap:
if self.select_tile_on_imaginary_board(letter + str(number), board).get_color() != TileColor.BLANK \
and self.select_tile_on_imaginary_board(letter + str(number), board).get_color() == self.color:
# check the possible position to move the tile_token
self.check_min_moving_heuristic(letter, number, board, depth, best_value)
elif self.select_tile_on_imaginary_board(letter + str(number), board).get_color() == TileColor.BLANK:
new_board = self.play_imaginary_turn_placing(board, letter, number, self.color)
current_value = self.mini_max(new_board, depth + 1, True)
if current_value < best_value:
best_value = current_value
self.best_moves.clear()
self.best_moves.append(letter + str(number))
elif current_value == best_value:
self.best_moves.append(letter + str(number))
else:
continue
return best_value
def check_max_moving_heuristic(self, letter, number, board, depth, best_value):
original_tile = letter + str(number)
# top_left
if board.is_valid_position((chr(ord(letter) - 1)), number + 1):
if self.select_tile_on_imaginary_board(chr(ord(letter) - 1) + str(number + 1), board).get_color() == TileColor.BLANK:
new_board = self.play_imaginary_turn_moving(board, chr(ord(letter) - 1), number + 1, self.opponent_color, original_tile)
current_value = self.mini_max(new_board, depth + 1, False)
if current_value > best_value:
best_value = current_value
self.best_moves.clear()
self.best_moves.append(chr(ord(letter) - 1) + str(number + 1))
elif current_value == best_value:
self.best_moves.append(chr(ord(letter) - 1) + str(number + 1))
# left
if board.is_valid_position((chr(ord(letter) - 1)), number):
if self.select_tile_on_imaginary_board(chr(ord(letter) - 1) + str(number), board).get_color() == TileColor.BLANK:
new_board = self.play_imaginary_turn_moving(board, chr(ord(letter) - 1), number, self.opponent_color, original_tile)
current_value = self.mini_max(new_board, depth + 1, False)
if current_value > best_value:
best_value = current_value
self.best_moves.clear()
self.best_moves.append(chr(ord(letter) - 1) + str(number))
elif current_value == best_value:
self.best_moves.append(chr(ord(letter) - 1) + str(number))
# bottom_left
if board.is_valid_position((chr(ord(letter) - 1)), number - 1):
if self.select_tile_on_imaginary_board(chr(ord(letter) - 1) + str(number - 1), board).get_color() == TileColor.BLANK:
new_board = self.play_imaginary_turn_moving(board, chr(ord(letter) - 1), number - 1, self.opponent_color, original_tile)
current_value = self.mini_max(new_board, depth + 1, False)
if current_value > best_value:
best_value = current_value
self.best_moves.clear()
self.best_moves.append(chr(ord(letter) - 1) + str(number - 1))
elif current_value == best_value:
self.best_moves.append(chr(ord(letter) - 1) + str(number - 1))
# bottom
if board.is_valid_position(letter, number - 1):
if self.select_tile_on_imaginary_board(letter + str(number - 1), board).get_color() == TileColor.BLANK:
new_board = self.play_imaginary_turn_moving(board, letter, number - 1, self.opponent_color, original_tile)
current_value = self.mini_max(new_board, depth + 1, False)
if current_value > best_value:
best_value = current_value
self.best_moves.clear()
self.best_moves.append(letter + str(number - 1))
elif current_value == best_value:
self.best_moves.append(letter + str(number - 1))
# bottom_right
if board.is_valid_position((chr(ord(letter) + 1)), number - 1):
if self.select_tile_on_imaginary_board(chr(ord(letter) + 1) + str(number - 1), board).get_color() == TileColor.BLANK:
new_board = self.play_imaginary_turn_moving(board, chr(ord(letter) + 1), number - 1, self.opponent_color, original_tile)
current_value = self.mini_max(new_board, depth + 1, False)
if current_value > best_value:
best_value = current_value
self.best_moves.clear()
self.best_moves.append(chr(ord(letter) + 1) + str(number - 1))
elif current_value == best_value:
self.best_moves.append(chr(ord(letter) + 1) + str(number - 1))
# right
if board.is_valid_position((chr(ord(letter) + 1)), number):
if self.select_tile_on_imaginary_board(chr(ord(letter) + 1) + str(number), board).get_color() == TileColor.BLANK:
new_board = self.play_imaginary_turn_moving(board, chr(ord(letter) + 1), number, self.opponent_color, original_tile)
current_value = self.mini_max(new_board, depth + 1, False)
if current_value > best_value:
best_value = current_value
self.best_moves.clear()
self.best_moves.append(chr(ord(letter) + 1) + str(number))
elif current_value == best_value:
self.best_moves.append(chr(ord(letter) + 1) + str(number))
# top right
if board.is_valid_position((chr(ord(letter) + 1)), number + 1):
if self.select_tile_on_imaginary_board(chr(ord(letter) + 1) + str(number + 1), board).get_color() == TileColor.BLANK:
new_board = self.play_imaginary_turn_moving(board, chr(ord(letter) + 1), number + 1, self.opponent_color, original_tile)
current_value = self.mini_max(new_board, depth + 1, False)
if current_value > best_value:
best_value = current_value
self.best_moves.clear()
self.best_moves.append(chr(ord(letter) + 1) + str(number + 1))
elif current_value == best_value:
self.best_moves.append(chr(ord(letter) + 1) + str(number + 1))
# top
if board.is_valid_position(letter, number + 1):
if self.select_tile_on_imaginary_board(letter + str(number + 1), board).get_color() == TileColor.BLANK:
new_board = self.play_imaginary_turn_moving(board, letter, number + 1, self.opponent_color, original_tile)
current_value = self.mini_max(new_board, depth + 1, False)
if current_value > best_value:
best_value = current_value
self.best_moves.clear()
self.best_moves.append(letter + str(number + 1))
elif current_value == best_value:
self.best_moves.append(letter + str(number + 1))
return best_value and self.best_moves
def check_min_moving_heuristic(self, letter, number, board, depth, best_value):
original_tile = letter + str(number)
# top_left
if board.is_valid_position((chr(ord(letter) - 1)), number + 1):
if self.select_tile_on_imaginary_board(chr(ord(letter) - 1) + str(number + 1), board).get_color() == TileColor.BLANK:
new_board = self.play_imaginary_turn_moving(board, chr(ord(letter) - 1), number + 1, self.color, original_tile)
current_value = self.mini_max(new_board, depth + 1, True)
if current_value < best_value:
best_value = current_value
self.best_moves.clear()
self.best_moves.append(chr(ord(letter) - 1) + str(number + 1))
elif current_value == best_value:
self.best_moves.append(chr(ord(letter) - 1) + str(number + 1))
# left
if board.is_valid_position((chr(ord(letter) - 1)), number):
if self.select_tile_on_imaginary_board(chr(ord(letter) - 1) + str(number), board).get_color() == TileColor.BLANK:
new_board = self.play_imaginary_turn_moving(board, chr(ord(letter) - 1), number, self.color, original_tile)
current_value = self.mini_max(new_board, depth + 1, True)
if current_value < best_value:
best_value = current_value
self.best_moves.clear()
self.best_moves.append(chr(ord(letter) - 1) + str(number))
elif current_value == best_value:
self.best_moves.append(chr(ord(letter) - 1) + str(number))
# bottom_left
if board.is_valid_position((chr(ord(letter) - 1)), number - 1):
if self.select_tile_on_imaginary_board(chr(ord(letter) - 1) + str(number - 1), board).get_color() == TileColor.BLANK:
new_board = self.play_imaginary_turn_moving(board, chr(ord(letter) - 1), number - 1, self.color, original_tile)
current_value = self.mini_max(new_board, depth + 1, True)
if current_value < best_value:
best_value = current_value
self.best_moves.clear()
self.best_moves.append(chr(ord(letter) - 1) + str(number - 1))
elif current_value == best_value:
self.best_moves.append(chr(ord(letter) - 1) + str(number - 1))
# bottom
if board.is_valid_position(letter, number - 1):
if self.select_tile_on_imaginary_board(letter + str(number - 1), board).get_color() == TileColor.BLANK:
new_board = self.play_imaginary_turn_moving(board, letter, number - 1, self.color, original_tile)
current_value = self.mini_max(new_board, depth + 1, True)
if current_value < best_value:
best_value = current_value
self.best_moves.clear()
self.best_moves.append(letter + str(number - 1))
elif current_value == best_value:
self.best_moves.append(letter + str(number - 1))
# bottom_right
if board.is_valid_position((chr(ord(letter) + 1)), number - 1):
if self.select_tile_on_imaginary_board(chr(ord(letter) + 1) + str(number - 1), board).get_color() == TileColor.BLANK:
new_board = self.play_imaginary_turn_moving(board, chr(ord(letter) + 1), number - 1, self.color, original_tile)
current_value = self.mini_max(new_board, depth + 1, True)
if current_value < best_value:
best_value = current_value
self.best_moves.clear()
self.best_moves.append(chr(ord(letter) + 1) + str(number - 1))
elif current_value == best_value:
self.best_moves.append(chr(ord(letter) + 1) + str(number - 1))
# right
if board.is_valid_position((chr(ord(letter) + 1)), number):
if self.select_tile_on_imaginary_board(chr(ord(letter) + 1) + str(number), board).get_color() == TileColor.BLANK:
new_board = self.play_imaginary_turn_moving(board, chr(ord(letter) + 1), number, self.color, original_tile)
current_value = self.mini_max(new_board, depth + 1, True)
if current_value < best_value:
best_value = current_value
self.best_moves.clear()
self.best_moves.append(chr(ord(letter) + 1) + str(number))
elif current_value == best_value:
self.best_moves.append(chr(ord(letter) + 1) + str(number))
# top right
if board.is_valid_position((chr(ord(letter) + 1)), number + 1):
if self.select_tile_on_imaginary_board(chr(ord(letter) + 1) + str(number + 1), board).get_color() == TileColor.BLANK:
new_board = self.play_imaginary_turn_moving(board, chr(ord(letter) + 1), number + 1, self.color, original_tile)
current_value = self.mini_max(new_board, depth + 1, True)
if current_value < best_value:
best_value = current_value
self.best_moves.clear()
self.best_moves.append(chr(ord(letter) + 1) + str(number + 1))
elif current_value == best_value:
self.best_moves.append(chr(ord(letter) + 1) + str(number + 1))
# top
if board.is_valid_position(letter, number + 1):
if self.select_tile_on_imaginary_board(letter + str(number + 1), board).get_color() == TileColor.BLANK:
new_board = self.play_imaginary_turn_moving(board, letter, number + 1, self.color, original_tile)
current_value = self.mini_max(new_board, depth + 1, True)
if current_value < best_value:
best_value = current_value
self.best_moves.clear()
self.best_moves.append(letter + str(number + 1))
elif current_value == best_value:
self.best_moves.append(letter + str(number + 1))
return best_value and self.best_moves
def play_imaginary_turn_placing(self, board, letter, number, color):
board_copy = copy.deepcopy(board)
self.place_tile_on_imaginary_board(self.select_tile_on_imaginary_board(letter + str(number), board_copy), color)
return board_copy
def play_imaginary_turn_moving(self, board, letter, number, color, original_tile):
board_copy = copy.deepcopy(board)
self.move_tile_on_imaginary_board(self.select_tile_on_imaginary_board(letter + str(number), board_copy), color, self.select_tile_on_imaginary_board(original_tile, board_copy))
return board_copy
@staticmethod
def select_tile_on_imaginary_board(position, board):
return board.get_tile(position[0], position[1:])
@staticmethod
def move_tile_on_imaginary_board(tile, color, original_tile):
tile.set_color(color)
original_tile.set_color(TileColor.BLANK)
@staticmethod
def place_tile_on_imaginary_board(tile, color):
tile.set_color(color)
@staticmethod
def get_bottom_left_heuristic(board, letter, number, opponent_color):
heuristic_value = 0
if board.is_valid_position((chr(ord(letter) - 1)), number - 1): # try the bottom left
if board.get_tile((chr(ord(letter) - 1)), number - 1).get_color() == opponent_color:
heuristic_value += 1
if board.is_valid_position((chr(ord(letter) - 2)), number - 2): # try bottom left of bottom left
if board.get_tile((chr(ord(letter) - 2)), number - 2).get_color() == opponent_color:
heuristic_value += 1
if board.is_valid_position((chr(ord(letter) - 2)), number): # try top left of the bottom left
if board.get_tile((chr(ord(letter) - 2)), number).get_color() == opponent_color:
heuristic_value += 1
if board.is_valid_position(letter, number - 2): # try bottom right of the bottom left
if board.get_tile(letter, number - 2).get_color() == opponent_color:
heuristic_value += 1
return heuristic_value
else:
return -1
@staticmethod
def get_top_left_heuristic(board, letter, number, opponent_color):
heuristic_value = 0
if board.is_valid_position((chr(ord(letter) - 1)), number + 1): # try the top left
if board.get_tile((chr(ord(letter) - 1)), number + 1).get_color() == opponent_color:
heuristic_value += 1
if board.is_valid_position((chr(ord(letter) - 2)), number): # try bottom left of the top left
if board.get_tile((chr(ord(letter) - 2)), number).get_color() == opponent_color:
heuristic_value += 1
if board.is_valid_position((chr(ord(letter) - 2)), number + 2): # try top left of the top left
if board.get_tile((chr(ord(letter) - 2)), number + 2).get_color() == opponent_color:
heuristic_value += 1
if board.is_valid_position(letter, number + 2): # try top right of the bottom left
if board.get_tile(letter, number + 2).get_color() == opponent_color:
heuristic_value += 1
return heuristic_value
else:
return -1
@staticmethod
def get_bottom_right_heuristic(board, letter, number, opponent_color):
heuristic_value = 0
if board.is_valid_position((chr(ord(letter) + 1)), number - 1): # try the bottom right
if board.get_tile((chr(ord(letter) + 1)), number - 1).get_color() == opponent_color:
heuristic_value += 1
if board.is_valid_position(letter, number - 2): # try bottom left of the bottom right
if board.get_tile(letter, number - 2).get_color() == opponent_color:
heuristic_value += 1
if board.is_valid_position((chr(ord(letter) + 2)), number): # try top right of the bottom right
if board.get_tile((chr(ord(letter) + 2)), number).get_color() == opponent_color:
heuristic_value += 1
if board.is_valid_position((chr(ord(letter) + 2)), number - 2): # try bottom right of bottom right
if board.get_tile((chr(ord(letter) + 2)), number - 2).get_color() == opponent_color:
heuristic_value += 1
return heuristic_value
else:
return -1
@staticmethod
def get_top_right_heuristic(board, letter, number, opponent_color):
heuristic_value = 0
if board.is_valid_position((chr(ord(letter) + 1)), number + 1): # try the top right
if board.get_tile((chr(ord(letter) + 1)), number + 1).get_color() == opponent_color:
heuristic_value += 1
if board.is_valid_position(letter, number + 2): # try top left of the top right
if board.get_tile(letter, number + 2).get_color() == opponent_color:
heuristic_value += 1
if board.is_valid_position((chr(ord(letter) + 2)), number + 2): # try top right of the top right
if board.get_tile((chr(ord(letter) + 2)), number + 2).get_color() == opponent_color:
heuristic_value += 1
if board.is_valid_position((chr(ord(letter) + 2)), number): # try bottom right of the top right
if board.get_tile((chr(ord(letter) + 2)), number).get_color() == opponent_color:
heuristic_value += 1
return heuristic_value
else:
return -1
def get_board_heuristic(self, board):
total_heuristic = 0
for number in range(1, len(board.tiles)+1):
for letter in board.letterMap:
if board.get_tile(letter, number).get_color() is not TileColor.BLANK:
current_color = board.get_tile(letter, number).get_color()
current_heuristic = 0
blocked = False
if board.is_valid_position((chr(ord(letter) - 1)), number - 1): # try the bottom left
if board.get_tile((chr(ord(letter) - 1)), number - 1).get_color() == current_color:
current_heuristic += 1
if board.is_valid_position((chr(ord(letter) - 1)), number + 1): # try top left
if board.get_tile((chr(ord(letter) - 1)), number + 1).get_color() == current_color:
current_heuristic += 1
if board.is_valid_position((chr(ord(letter) + 1)), number - 1): # try bottom right
if board.get_tile((chr(ord(letter) + 1)), number - 1).get_color() == current_color:
current_heuristic += 1
if board.is_valid_position((chr(ord(letter) + 1)), number + 1): # try top right
if board.get_tile((chr(ord(letter) + 1)), number + 1).get_color() == current_color:
current_heuristic += 1
# check block condition
if board.is_valid_position((chr(ord(letter) + 1)), number): # try right
if board.get_tile((chr(ord(letter) + 1)), number).get_color() != current_color:
if board.is_valid_position((chr(ord(letter) - 1)), number): # try left
if board.get_tile((chr(ord(letter) - 1)), number).get_color() != current_color:
blocked = True
if current_heuristic == 4 and not blocked:
current_heuristic += 1000
if current_color is self.opponent_color:
total_heuristic += current_heuristic
else:
total_heuristic -= current_heuristic
# print('heuristic value = ' + str(total_heuristic))
return total_heuristic
| 60.572115
| 183
| 0.58338
| 3,109
| 25,198
| 4.485687
| 0.04889
| 0.041302
| 0.082604
| 0.074573
| 0.865338
| 0.858167
| 0.840241
| 0.835652
| 0.833716
| 0.833716
| 0
| 0.016576
| 0.310461
| 25,198
| 415
| 184
| 60.718072
| 0.786072
| 0.072307
| 0
| 0.732759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043103
| false
| 0
| 0.014368
| 0.002874
| 0.112069
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
071257d24ca96fffac6eb56f85b04c15790e4155
| 25,290
|
py
|
Python
|
dbms/tests/integration/test_ttl_move/test.py
|
alex-krash/ClickHouse
|
3a8dbf434119df48e52f7f803a7f76b1eb4e84f5
|
[
"Apache-2.0"
] | null | null | null |
dbms/tests/integration/test_ttl_move/test.py
|
alex-krash/ClickHouse
|
3a8dbf434119df48e52f7f803a7f76b1eb4e84f5
|
[
"Apache-2.0"
] | null | null | null |
dbms/tests/integration/test_ttl_move/test.py
|
alex-krash/ClickHouse
|
3a8dbf434119df48e52f7f803a7f76b1eb4e84f5
|
[
"Apache-2.0"
] | null | null | null |
import json
import pytest
import random
import re
import string
import threading
import time
from multiprocessing.dummy import Pool
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1',
config_dir='configs',
main_configs=['configs/logs_config.xml'],
with_zookeeper=True,
tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'],
macros={"shard": 0, "replica": 1} )
node2 = cluster.add_instance('node2',
config_dir='configs',
main_configs=['configs/logs_config.xml'],
with_zookeeper=True,
tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'],
macros={"shard": 0, "replica": 2} )
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def get_random_string(length):
symbols = bytes(string.ascii_uppercase + string.digits)
result_list = bytearray([0])*length
for i in range(length):
result_list[i] = random.choice(symbols)
return str(result_list)
def get_used_disks_for_table(node, table_name):
return node.query("select disk_name from system.parts where table == '{}' and active=1 order by modification_time".format(table_name)).strip().split('\n')
@pytest.mark.parametrize("name,engine,alter", [
("mt_test_rule_with_invalid_destination","MergeTree()",0),
("replicated_mt_test_rule_with_invalid_destination","ReplicatedMergeTree('/clickhouse/replicated_test_rule_with_invalid_destination', '1')",0),
("mt_test_rule_with_invalid_destination","MergeTree()",1),
("replicated_mt_test_rule_with_invalid_destination","ReplicatedMergeTree('/clickhouse/replicated_test_rule_with_invalid_destination', '1')",1),
])
def test_rule_with_invalid_destination(started_cluster, name, engine, alter):
try:
def get_command(x, policy):
x = x or ""
if alter and x:
return """
ALTER TABLE {name} MODIFY TTL {expression}
""".format(expression=x, name=name)
else:
return """
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
{expression}
SETTINGS storage_policy='{policy}'
""".format(expression=x, name=name, engine=engine, policy=policy)
if alter:
node1.query(get_command(None, "small_jbod_with_external"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO DISK 'unknown'", "small_jbod_with_external"))
node1.query("DROP TABLE IF EXISTS {}".format(name))
if alter:
node1.query(get_command(None, "small_jbod_with_external"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO VOLUME 'unknown'", "small_jbod_with_external"))
node1.query("DROP TABLE IF EXISTS {}".format(name))
if alter:
node1.query(get_command(None, "only_jbod2"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO DISK 'jbod1'", "only_jbod2"))
node1.query("DROP TABLE IF EXISTS {}".format(name))
if alter:
node1.query(get_command(None, "only_jbod2"))
with pytest.raises(QueryRuntimeException):
node1.query(get_command("TTL d1 TO VOLUME 'external'", "only_jbod2"))
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_inserts_to_disk_do_not_work","MergeTree()",0),
("replicated_mt_test_inserts_to_disk_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_disk_do_not_work', '1')",0),
("mt_test_inserts_to_disk_work","MergeTree()",1),
("replicated_mt_test_inserts_to_disk_work","ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_disk_work', '1')",1),
])
def test_inserts_to_disk_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1 if i > 0 or positive else time.time()+300))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_moves_to_disk_do_not_work","MergeTree()",0),
("replicated_mt_test_moves_to_disk_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_do_not_work', '1')",0),
("mt_test_moves_to_disk_work","MergeTree()",1),
("replicated_mt_test_moves_to_disk_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_work', '1')",1),
])
def test_moves_to_disk_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
wait_expire_1 = 6
wait_expire_2 = 4
time_1 = time.time() + wait_expire_1
time_2 = time.time() + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1 if i > 0 or positive else time_2))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
wait_expire_1_thread.join()
time.sleep(wait_expire_2/2)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine", [
("mt_test_moves_to_volume_work","MergeTree()"),
("replicated_mt_test_moves_to_volume_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_volume_work', '1')"),
])
def test_moves_to_volume_work(started_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
p1 Int64,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY p1
TTL d1 TO VOLUME 'external'
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
wait_expire_1 = 10
time_1 = time.time() + wait_expire_1
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for p in range(2):
data = [] # 10MB in total
for i in range(5):
data.append((str(p), "'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1))) # 1MB row
node1.query("INSERT INTO {} (p1, s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {'jbod1', 'jbod2'}
wait_expire_1_thread.join()
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_inserts_to_volume_do_not_work","MergeTree()",0),
("replicated_mt_test_inserts_to_volume_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_volume_do_not_work', '1')",0),
("mt_test_inserts_to_volume_work","MergeTree()",1),
("replicated_mt_test_inserts_to_volume_work","ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_volume_work', '1')",1),
])
def test_inserts_to_volume_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
p1 Int64,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY p1
TTL d1 TO VOLUME 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MOVES {name}".format(name=name))
for p in range(2):
data = [] # 20MB in total
for i in range(10):
data.append((str(p), "'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1 if i > 0 or positive else time.time()+300))) # 1MB row
node1.query("INSERT INTO {} (p1, s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "20"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine", [
("mt_test_moves_to_disk_eventually_work","MergeTree()"),
("replicated_mt_test_moves_to_disk_eventually_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_eventually_work', '1')"),
])
def test_moves_to_disk_eventually_work(started_cluster, name, engine):
try:
name_temp = name + "_temp"
node1.query("""
CREATE TABLE {name} (
s1 String
) ENGINE = MergeTree()
ORDER BY tuple()
SETTINGS storage_policy='only_jbod2'
""".format(name=name_temp))
data = [] # 35MB in total
for i in range(35):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query("INSERT INTO {} VALUES {}".format(name_temp, ",".join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name_temp)
assert set(used_disks) == {"jbod2"}
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'jbod2'
SETTINGS storage_policy='jbod1_with_jbod2'
""".format(name=name, engine=engine))
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
node1.query("DROP TABLE {}".format(name_temp))
time.sleep(2)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod2"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name_temp))
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_merges_to_disk_do_not_work","MergeTree()",0),
("replicated_mt_test_merges_to_disk_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_merges_to_disk_do_not_work', '1')",0),
("mt_test_merges_to_disk_work","MergeTree()",1),
("replicated_mt_test_merges_to_disk_work","ReplicatedMergeTree('/clickhouse/replicated_test_merges_to_disk_work', '1')",1),
])
def test_merges_to_disk_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MERGES {}".format(name))
node1.query("SYSTEM STOP MOVES {}".format(name))
wait_expire_1 = 10
wait_expire_2 = 4
time_1 = time.time() + wait_expire_1
time_2 = time.time() + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for _ in range(2):
data = [] # 16MB in total
for i in range(8):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1 if i > 0 or positive else time_2))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
assert "2" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
wait_expire_1_thread.join()
time.sleep(wait_expire_2/2)
node1.query("SYSTEM START MERGES {}".format(name))
node1.query("OPTIMIZE TABLE {}".format(name))
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert "1" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "16"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine", [
("mt_test_merges_with_full_disk_work","MergeTree()"),
("replicated_mt_test_merges_with_full_disk_work","ReplicatedMergeTree('/clickhouse/replicated_test_merges_with_full_disk_work', '1')"),
])
def test_merges_with_full_disk_work(started_cluster, name, engine):
try:
name_temp = name + "_temp"
node1.query("""
CREATE TABLE {name} (
s1 String
) ENGINE = MergeTree()
ORDER BY tuple()
SETTINGS storage_policy='only_jbod2'
""".format(name=name_temp))
data = [] # 35MB in total
for i in range(35):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query("INSERT INTO {} VALUES {}".format(name_temp, ",".join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name_temp)
assert set(used_disks) == {"jbod2"}
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'jbod2'
SETTINGS storage_policy='jbod1_with_jbod2'
""".format(name=name, engine=engine))
wait_expire_1 = 10
time_1 = time.time() + wait_expire_1
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for _ in range(2):
data = [] # 12MB in total
for i in range(6):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
assert "2" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
wait_expire_1_thread.join()
node1.query("OPTIMIZE TABLE {}".format(name))
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"} # Merged to the same disk against the rule.
assert "1" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "12"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name_temp))
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_moves_after_merges_do_not_work","MergeTree()",0),
("replicated_mt_test_moves_after_merges_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_merges_do_not_work', '1')",0),
("mt_test_moves_after_merges_work","MergeTree()",1),
("replicated_mt_test_moves_after_merges_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_merges_work', '1')",1),
])
def test_moves_after_merges_work(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
wait_expire_1 = 10
wait_expire_2 = 4
time_1 = time.time() + wait_expire_1
time_2 = time.time() + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading.Thread(target=time.sleep, args=(wait_expire_1,))
wait_expire_1_thread.start()
for _ in range(2):
data = [] # 14MB in total
for i in range(7):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time_1 if i > 0 or positive else time_2))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
node1.query("OPTIMIZE TABLE {}".format(name))
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1"}
assert "1" == node1.query("SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(name)).strip()
wait_expire_1_thread.join()
time.sleep(wait_expire_2/2)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external" if positive else "jbod1"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "14"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_moves_after_merges_do_not_work","MergeTree()",0),
("replicated_mt_test_moves_after_merges_do_not_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_merges_do_not_work', '1')",0),
("mt_test_moves_after_merges_work","MergeTree()",1),
("replicated_mt_test_moves_after_merges_work","ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_merges_work', '1')",1),
])
def test_ttls_do_not_work_after_alter(started_cluster, name, engine, positive):
try:
node1.query("""
CREATE TABLE {name} (
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
TTL d1 TO DISK 'external'
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
if positive:
node1.query("""
ALTER TABLE {name}
MODIFY TTL
d1 + INTERVAL 15 MINUTE
""".format(name=name)) # That shall disable TTL.
data = [] # 10MB in total
for i in range(10):
data.append(("'{}'".format(get_random_string(1024 * 1024)), "toDateTime({})".format(time.time()-1))) # 1MB row
node1.query("INSERT INTO {} (s1, d1) VALUES {}".format(name, ",".join(["(" + ",".join(x) + ")" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod1" if positive else "external"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine,positive", [
("mt_test_alter_multiple_ttls_positive", "MergeTree()", True),
("mt_replicated_test_alter_multiple_ttls_positive", "ReplicatedMergeTree('/clickhouse/replicated_test_alter_multiple_ttls_positive', '1')", True),
("mt_test_alter_multiple_ttls_negative", "MergeTree()", False),
("mt_replicated_test_alter_multiple_ttls_negative", "ReplicatedMergeTree('/clickhouse/replicated_test_alter_multiple_ttls_negative', '1')", False),
])
def test_alter_multiple_ttls(started_cluster, name, engine, positive):
"""Copyright 2019, Altinity LTD
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
"""Check that when multiple TTL expressions are set
and before any parts are inserted the TTL expressions
are changed with ALTER command then all old
TTL expressions are removed and the
the parts are moved to the specified disk or volume or
deleted if the new TTL expression is triggered
and are not moved or deleted when it is not.
"""
now = time.time()
try:
node1.query("""
CREATE TABLE {name} (
p1 Int64,
s1 String,
d1 DateTime
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY p1
TTL d1 + INTERVAL 30 SECOND TO DISK 'jbod2',
d1 + INTERVAL 60 SECOND TO VOLUME 'external'
SETTINGS storage_policy='jbods_with_external', merge_with_ttl_timeout=0
""".format(name=name, engine=engine))
node1.query("""
ALTER TABLE {name} MODIFY
TTL d1 + INTERVAL 0 SECOND TO DISK 'jbod2',
d1 + INTERVAL 5 SECOND TO VOLUME 'external',
d1 + INTERVAL 10 SECOND DELETE
""".format(name=name))
for p in range(3):
data = [] # 6MB in total
now = time.time()
for i in range(2):
p1 = p
s1 = get_random_string(1024 * 1024) # 1MB
d1 = now - 1 if i > 0 or positive else now + 300
data.append("({}, '{}', toDateTime({}))".format(p1, s1, d1))
node1.query("INSERT INTO {name} (p1, s1, d1) VALUES {values}".format(name=name, values=",".join(data)))
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"jbod2"} if positive else {"jbod1", "jbod2"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["6"]
time.sleep(5)
used_disks = get_used_disks_for_table(node1, name)
assert set(used_disks) == {"external"} if positive else {"jbod1", "jbod2"}
assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["6"]
time.sleep(5)
node1.query("OPTIMIZE TABLE {name} FINAL".format(name=name))
assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["0"] if positive else ["3"]
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
| 40.5939
| 176
| 0.618782
| 3,183
| 25,290
| 4.673893
| 0.088282
| 0.051086
| 0.0244
| 0.020165
| 0.853465
| 0.829536
| 0.804127
| 0.753848
| 0.716341
| 0.681791
| 0
| 0.031266
| 0.243733
| 25,290
| 622
| 177
| 40.659164
| 0.746575
| 0.034757
| 0
| 0.722815
| 0
| 0
| 0.412267
| 0.148026
| 0
| 0
| 0
| 0
| 0.076759
| 1
| 0.031983
| false
| 0
| 0.023454
| 0.002132
| 0.063966
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
075a3a618b71c80864a14c443654ea33c9ab4883
| 1,383
|
py
|
Python
|
001-010/008.py
|
KKishikawa/project-euler-for-study-code
|
fd46f1ee3211b5daa6d042cc993e55f631c9caac
|
[
"MIT"
] | null | null | null |
001-010/008.py
|
KKishikawa/project-euler-for-study-code
|
fd46f1ee3211b5daa6d042cc993e55f631c9caac
|
[
"MIT"
] | null | null | null |
001-010/008.py
|
KKishikawa/project-euler-for-study-code
|
fd46f1ee3211b5daa6d042cc993e55f631c9caac
|
[
"MIT"
] | null | null | null |
problem_num = '7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450'
# 長い桁はそのまま数値で扱うとオーバーフローするので、
# 配列で操作する
digits = 13
# 数値の比較ができるように初期化しておく
larger_num = 0
# 文字の配列をそれぞれ数値に変換した後、
# digits桁ずつ重複ありで切り出して積を算出し、比較を行っていく
for i in range(len(problem_num)):
current_num = 1
for tmp_s in problem_num[i: i + digits]:
current_num *= int(tmp_s)
if current_num > larger_num:
larger_num = current_num
print(larger_num)
| 72.789474
| 1,016
| 0.921909
| 50
| 1,383
| 25.24
| 0.5
| 0.028526
| 0.020602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.767584
| 0.05423
| 1,383
| 18
| 1,017
| 76.833333
| 0.197248
| 0.078091
| 0
| 0
| 0
| 0
| 0.788022
| 0.788022
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.1
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0769f32bba0afaef6a6e3f01b3b10b1bbe729846
| 325,368
|
py
|
Python
|
test/ITU_validation_test.py
|
iportillo/ITU-Rpy
|
35123f8894137f9bcc375b4513367d068cc33ca1
|
[
"MIT"
] | 34
|
2018-06-01T17:12:37.000Z
|
2021-04-03T11:33:00.000Z
|
test/ITU_validation_test.py
|
hygson/ITU-Rpy
|
35123f8894137f9bcc375b4513367d068cc33ca1
|
[
"MIT"
] | 25
|
2021-04-08T17:25:57.000Z
|
2022-01-25T00:58:30.000Z
|
test/ITU_validation_test.py
|
hygson/ITU-Rpy
|
35123f8894137f9bcc375b4513367d068cc33ca1
|
[
"MIT"
] | 253
|
2021-04-25T23:21:23.000Z
|
2022-03-14T11:09:01.000Z
|
# -*- coding: utf-8 -*-
import unittest as test
import itur
import itur.models as models
import sys
from astropy import units as u
def suite():
""" A test suite for the ITU-P Recommendations. Recommendations tested:
* ITU-P R-676-9
* ITU-P R-676-11
* ITU-P R-618-12
* ITU-P R-618-13
* ITU-P R-453-12
* ITU-P R-837-6
* ITU-P R-837-7
* ITU-P R-838-3
* ITU-P R-839-4
* ITU-P R-840-4
* ITU-P R-840-7
* ITU-P R-1511-1
"""
suite = test.TestSuite()
# Ensure models are in the right version
models.itu453.change_version(13)
models.itu618.change_version(13)
models.itu676.change_version(11)
models.itu836.change_version(6)
models.itu837.change_version(7)
models.itu838.change_version(3)
models.itu839.change_version(4)
models.itu840.change_version(7)
models.itu1510.change_version(1)
models.itu1511.change_version(1)
# ITU-R P.676 tests (Gaseous attenuation)
suite.addTest(ITUR676_9TestCase('test_gammaw'))
suite.addTest(ITUR676_9TestCase('test_gamma0'))
# suite.addTest(ITUR676_9TestCase('test_zenit_water_vapour_attenuation'))
suite.addTest(ITUR676_11TestCase('test_gammaw_exact'))
suite.addTest(ITUR676_11TestCase('test_gamma0_exact'))
suite.addTest(ITUR676_11TestCase('test_gammaw_approx'))
suite.addTest(ITUR676_11TestCase('test_gamma0_approx'))
suite.addTest(ITUR676_11TestCase('test_zenit_water_vapour_attenuation'))
# ITU-R P.618 tests (Rain attenuation)
suite.addTest(ITUR618_12TestCase(
'test_rain_cross_polarization_discrimination'))
suite.addTest(ITUR618_12TestCase('test_rain_attenuation'))
suite.addTest(ITUR618_12TestCase('test_scintillation_attenuation'))
suite.addTest(ITUR618_13TestCase('test_rain_attenuation'))
suite.addTest(ITUR618_13TestCase('test_probability_of_rain_attenuation'))
# suite.addTest(ITUR618_13TestCase('test_site_diversity'))
suite.addTest(ITUR618_13TestCase('test_scintillation_attenuation'))
suite.addTest(ITUR618_13TestCase(
'test_rain_cross_polarization_discrimination'))
suite.addTest(ITUR618_13TestCase('test_total_attenuation'))
# ITU-R P.453 tests (Wet term radio refractivity)
suite.addTest(ITUR453_12TestCase('test_wet_term_radio_refractivity'))
suite.addTest(ITUR453_13TestCase('test_wet_term_radio_refractivity'))
# ITU-R P.836 tests (Water vapour density)
suite.addTest(ITUR836_6TestCase('test_surface_water_vapour_density'))
suite.addTest(ITUR836_6TestCase('test_total_water_vapour_content'))
# ITU-R P.836 tests (Rainfall rate)
suite.addTest(ITUR837_6TestCase('test_rainfall_rate'))
suite.addTest(ITUR837_7TestCase('test_rainfall_rate'))
suite.addTest(ITUR837_7TestCase('test_rainfall_probability'))
suite.addTest(ITUR837_7TestCase('test_rainfall_rate_R001'))
# ITU-R P.836 tests (Rainfall specific attenuation)
suite.addTest(ITUR838_3TestCase('test_rain_specific_attenuation'))
# ITU-R P.839 tests (Rain height)
suite.addTest(ITUR839_4TestCase('test_isoterm_0_deg'))
suite.addTest(ITUR839_4TestCase('test_rain_height'))
# ITU-R P.840 tests (Clouds attenuation)
# suite.addTest(ITUR840_4TestCase('test_columnar_content_reduced_liquid'))
# suite.addTest(ITUR840_4TestCase('test_cloud_attenuation'))
suite.addTest(ITUR840_7TestCase('test_columnar_content_reduced_liquid'))
suite.addTest(ITUR840_7TestCase('test_cloud_attenuation'))
# ITU-R P.1511 tests (Topographic altitude)
suite.addTest(ITUR1511_1TestCase('test_topographic_altitude'))
suite.addTest(ITUR1511_2TestCase('test_topographic_altitude'))
return suite
class ITUR453_12TestCase(test.TestCase):
def setUp(self):
models.itu453.change_version(12)
def test_wet_term_radio_refractivity(self):
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(51.5, 359.86).value,
45.130667, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(41.9, 12.49).value,
53.756489, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(33.94, 18.43).value,
76.349680, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(51.5, 359.86).value,
45.130667, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(41.9, 12.49).value,
53.756489, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(33.94, 18.43).value,
76.349680, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(22.9, 316.77).value,
87.907733, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(25.78, 279.78).value,
101.416373, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(22.9, 316.77).value,
87.907733, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(25.78, 279.78).value,
101.416373, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(28.717, 77.3).value,
60.060569, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(3.133, 101.7).value,
105.920333, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(9.05, 38.7).value,
50.162000, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(28.717, 77.3).value,
60.060569, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(3.133, 101.7).value,
105.920333, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(9.05, 38.7).value,
50.162000, places=5)
class ITUR453_13TestCase(test.TestCase):
def setUp(self):
models.itu453.change_version(13)
def test_wet_term_radio_refractivity(self):
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
3.133, 101.7, 50).value,
128.14080027, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
22.9, -43.23, 50).value,
104.35847467, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
23, 30, 50).value,
36.47166667, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
25.78, -80.22, 50).value,
113.2738672, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
28.717, 77.3, 50).value,
75.66013547, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
33.94, 18.43, 50).value,
80.14015964, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
41.9, 12.49, 50).value,
61.21890044, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
51.5, -0.14, 50).value,
50.38926222, places=5)
class ITUR676_9TestCase(test.TestCase):
def setUp(self):
models.itu676.change_version(9)
models.itu836.change_version(4)
def test_gammaw(self):
# The ITU models are non-sense and believe that the conversion between
# Kelvin is 273 instead of 273.15
self.assertAlmostEqual(
models.itu676.gammaw_approx(12, 1013.25, 4.98154290000,
(5.9435147000 - 0.15) * u.deg_C).value,
0.00705700000, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(20, 1013.25, 4.98154290000,
(5.9435147000 - 0.15) * u.deg_C).value,
0.06742720000, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(60, 1013.25, 4.98154290000,
(5.9435147000 - 0.15) * u.deg_C).value,
0.11538020000, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(90, 1013.25, 4.98154290000,
(5.9435147000 - 0.15) * u.deg_C).value,
0.25568340000, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(130, 1013.25, 4.98154290000,
(5.9435147000 - 0.15) * u.deg_C).value,
0.56358380000, places=5)
def test_gamma0(self):
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
282.724 - 0.15).value,
0.00941327, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.00898682, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.00851359, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
282.724 - 0.15).value,
0.00941327, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.00898682, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.00851359, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
282.724 - 0.15).value,
0.00941327, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.00898682, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.00851359, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
282.724 - 0.15).value,
0.00941327, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.00898682, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.00851359, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
282.724 - 0.15).value,
0.02043748, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.01954568, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.01856193, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
282.724 - 0.15).value,
0.02043748, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.01954568, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.01856193, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
282.724 - 0.15).value,
0.02043748, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.01954568, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.01856193, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
282.724 - 0.15).value,
0.02043748, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.01954568, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.01856193, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
296.602 - 0.15).value,
0.00824203, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(
14.25, 1013.25, 7.5, 296.7208533 - 0.15).value,
0.0082329, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
296.602 - 0.15).value,
0.00824203, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
296.7208533 - 0.15).value,
0.0082329, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
296.602 - 0.15).value,
0.00824203, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
296.7208533 - 0.15).value,
0.0082329, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
296.602 - 0.15).value,
0.00824203, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(
14.25, 1013.25, 7.5, 296.7208533 - 0.15).value,
0.0082329, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.602 - 0.15).value,
0.01800011, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.7208533 - 0.15).value,
0.01798125, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.602 - 0.15).value,
0.01800011, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.7208533 - 0.15).value,
0.01798125, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.602 - 0.15).value,
0.01800011, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.7208533 - 0.15).value,
0.01798125, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.602 - 0.15).value,
0.01800011, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.7208533 - 0.15).value,
0.01798125, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.00805331, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.00814064, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.444 - 0.15).value,
0.00899025, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.00805331, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.00814064, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.444 - 0.15).value,
0.00899025, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.00805331, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.00814064, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.444 - 0.15).value,
0.00899025, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.00805331, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.00814064, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.444 - 0.15).value,
0.00899025, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.01761077, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.01779083, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.444 - 0.15).value,
0.01955282, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.01761077, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.01779083, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.444 - 0.15).value,
0.01955282, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.01761077, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.01779083, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.444 - 0.15).value,
0.01955282, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.01761077, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.01779083, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.444 - 0.15).value,
0.01955282, places=5)
# def zenit_water_vapour_attenuation(self, lat, lon, el, p, f, alt):
# gamma_w = models.itu676.zenit_water_vapour_attenuation(lat,
# lon,
# p,
# f,
# None,
# alt=alt).value
# return gamma_w / np.sin(np.deg2rad(el))
#
# def test_zenit_water_vapour_attenuation(self):
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 51.5, 359.86, 30.87067768, 1, 14.25, 0.06916422),
# 0.12789267, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 41.9, 12.49, 40.97052773, 1, 14.25, 0.05670104),
# 0.10865204, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 33.94, 18.43, 47.91280491, 1, 14.25, 0),
# 0.10205633, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 51.5, 359.86, 30.87067768, 0.1, 14.25, 0.06916422),
# 0.15315923, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 41.9, 12.49, 40.97052773, 0.1, 14.25, 0.05670104),
# 0.12223686, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 33.94, 18.43, 47.91280491, 0.1, 14.25, 0),
# 0.12410189, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 51.5, 359.86, 30.87067768, 0.01, 14.25, 0.06916422),
# 0.15315923, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 41.9, 12.49, 40.97052773, 0.01, 14.25, 0.05670104),
# 0.12223686, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 33.94, 18.43, 47.91280491, 0.01, 14.25, 0),
# 0.12410189, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 51.5, 359.86, 30.87067768, 0.001, 14.25, 0.06916422),
# 0.15315923, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 41.9, 12.49, 40.97052773, 0.001, 14.25, 0.05670104),
# 0.12223686, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 33.94, 18.43, 47.91280491, 0.001, 14.25, 0),
# 0.12410189, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 51.5, 359.86, 30.87067768, 1, 29, 0.06916422),
# 0.60896934, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 41.9, 12.49, 40.97052773, 1, 29, 0.05670104),
# 0.51690529, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 33.94, 18.43, 47.91280491, 1, 29, 0),
# 0.48519817, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 51.5, 359.86, 30.87067768, 0.1, 29, 0.06916422),
# 0.72784676, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 41.9, 12.49, 40.97052773, 0.1, 29, 0.05670104),
# 0.58076456, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 33.94, 18.43, 47.91280491, 0.1, 29, 0),
# 0.58863533, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 51.5, 359.86, 30.87067768, 0.01, 29, 0.06916422),
# 0.72784676, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 41.9, 12.49, 40.97052773, 0.01, 29, 0.05670104),
# 0.58076456, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 33.94, 18.43, 47.91280491, 0.01, 29, 0),
# 0.58863533, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 51.5, 359.86, 30.87067768, 0.001, 29, 0.06916422),
# 0.72784676, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 41.9, 12.49, 40.97052773, 0.001, 29, 0.05670104),
# 0.58076456, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 33.94, 18.43, 47.91280491, 0.001, 29, 0),
# 0.58863533, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 22.9, 316.77, 59.81487174, 1, 14.25, 0),
# 0.1181882, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 25.78, 279.78, 49.20900369, 1, 14.25, 0.00007511),
# 0.16093386, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 22.9, 316.77, 59.81487174, 0.1, 14.25, 0),
# 0.13730617, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 25.78, 279.78, 49.20900369, 0.1, 14.25, 0.00007511),
# 0.17798382, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 22.9, 316.77, 59.81487174, 0.01, 14.25, 0),
# 0.13730617, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 25.78, 279.78, 49.20900369, 0.01, 14.25, 0.00007511),
# 0.17798382, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 22.9, 316.77, 59.81487174, 0.001, 14.25, 0),
# 0.13730617, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 25.78, 279.78, 49.20900369, 0.001, 14.25, 0.00007511),
# 0.17798382, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 22.9, 316.77, 59.81487174, 1, 29, 0),
# 0.55983815, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 25.78, 279.78, 49.20900369, 1, 29, 0.00007511),
# 0.76047761, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 22.9, 316.77, 59.81487174, 0.1, 29, 0),
# 0.64906814, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 25.78, 279.78, 49.20900369, 0.1, 29, 0.00007511),
# 0.83981774, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 22.9, 316.77, 59.81487174, 0.01, 29, 0),
# 0.64906814, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 25.78, 279.78, 49.20900369, 0.01, 29, 0.00007511),
# 0.83981774, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 22.9, 316.77, 59.81487174, 0.001, 29, 0),
# 0.64906814, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 25.78, 279.78, 49.20900369, 0.001, 29, 0.00007511),
# 0.83981774, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 28.717, 77.3, 55.90591362, 1, 14.25, 0.21755946),
# 0.18628614, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 3.133, 101.7, 67.76751981, 1, 14.25, 0.23610446),
# 0.13468573, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 9.05, 38.7, 38.14104832, 1, 14.25, 2.45000492),
# 0.08369587, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 28.717, 77.3, 55.90591362, 0.1, 14.25, 0.21755946),
# 0.20242415, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 3.133, 101.7, 67.76751981, 0.1, 14.25, 0.23610446),
# 0.14372476, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 9.05, 38.7, 38.14104832, 0.1, 14.25, 2.45000492),
# 0.09153026, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 28.717, 77.3, 55.90591362, 0.01, 14.25, 0.21755946),
# 0.20242415, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 3.133, 101.7, 67.76751981, 0.01, 14.25, 0.23610446),
# 0.14372476, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 9.05, 38.7, 38.14104832, 0.01, 14.25, 2.45000492),
# 0.09153026, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 28.717, 77.3, 55.90591362, 0.001, 14.25, 0.21755946),
# 0.20242415, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 3.133, 101.7, 67.76751981, 0.001, 14.25, 0.23610446),
# 0.14372476, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 9.05, 38.7, 38.14104832, 0.001, 14.25, 2.45000492),
# 0.09153026, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 28.717, 77.3, 55.90591362, 1, 29, 0.21755946),
# 0.8771945, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 3.133, 101.7, 67.76751981, 1, 29, 0.23610446),
# 0.63623574, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 9.05, 38.7, 38.14104832, 1, 29, 2.45000492),
# 0.39942177, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 28.717, 77.3, 55.90591362, 0.1, 29, 0.21755946),
# 0.95194476, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 3.133, 101.7, 67.76751981, 0.1, 29, 0.23610446),
# 0.67829402, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 9.05, 38.7, 38.14104832, 0.1, 29, 2.45000492),
# 0.43646179, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 28.717, 77.3, 55.90591362, 0.01, 29, 0.21755946),
# 0.95194476, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 3.133, 101.7, 67.76751981, 0.01, 29, 0.23610446),
# 0.67829402, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 9.05, 38.7, 38.14104832, 0.01, 29, 2.45000492),
# 0.43646179, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 28.717, 77.3, 55.90591362, 0.001, 29, 0.21755946),
# 0.95194476, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 3.133, 101.7, 67.76751981, 0.001, 29, 0.23610446),
# 0.67829402, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 9.05, 38.7, 38.14104832, 0.001, 29, 2.45000492),
# 0.43646179, places=5)
class ITUR676_11TestCase(test.TestCase):
def setUp(self):
models.itu676.change_version(11)
models.itu836.change_version(6)
models.itu1511.change_version(1)
def test_gammaw_exact(self):
# The ITU models are non-sense and believe that the conversion between
# Kelvin is 273 instead of 273.15
self.assertAlmostEqual(
models.itu676.gammaw_exact(12, 1013.25, 7.5, 288.15).value,
0.00953539, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_exact(20, 1013.25, 7.5, 288.15).value,
0.09704730, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_exact(60, 1013.25, 7.5, 288.15).value,
0.15484184, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_exact(90, 1013.25, 7.5, 288.15).value,
0.34197339, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_exact(130, 1013.25, 7.5, 288.15).value,
0.75184470, places=5)
def test_gamma0_exact(self):
self.assertAlmostEqual(
models.itu676.gamma0_exact(12, 1013.25, 7.5, 288.15).value,
0.00869826, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_exact(20, 1013.25, 7.5, 288.15).value,
0.01188355, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_exact(60, 1013.25, 7.5, 288.15).value,
14.62347480, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_exact(90, 1013.25, 7.5, 288.15).value,
0.03886971, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_exact(130, 1013.25, 7.5, 288.15).value,
0.04150908, places=5)
def test_gammaw_approx(self):
# The ITU models are non-sense and believe that the conversion between
# Kelvin is 273 instead of 273.15
self.assertAlmostEqual(
models.itu676.gammaw_approx(1, 1013.25, 7.5, 288.15).value,
5.06e-05, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(2, 1013.25, 7.5, 288.15).value,
0.000203124, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(3, 1013.25, 7.5, 288.15).value,
0.000459962, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(4, 1013.25, 7.5, 288.15).value,
0.000825295, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(5, 1013.25, 7.5, 288.15).value,
0.001305574, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(6, 1013.25, 7.5, 288.15).value,
0.001910194, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(7, 1013.25, 7.5, 288.15).value,
0.00265257, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(8, 1013.25, 7.5, 288.15).value,
0.00355178, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(9, 1013.25, 7.5, 288.15).value,
0.00463511, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(10, 1013.25, 7.5, 288.15).value,
0.005942065, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(11, 1013.25, 7.5, 288.15).value,
0.007530789, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(12, 1013.25, 7.5, 288.15).value,
0.009488627, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(13, 1013.25, 7.5, 288.15).value,
0.01194992, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(14, 1013.25, 7.5, 288.15).value,
0.015126834, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(15, 1013.25, 7.5, 288.15).value,
0.019364141, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(16, 1013.25, 7.5, 288.15).value,
0.025238305, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(17, 1013.25, 7.5, 288.15).value,
0.033736014, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(18, 1013.25, 7.5, 288.15).value,
0.04655406, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(19, 1013.25, 7.5, 288.15).value,
0.066459485, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(20, 1013.25, 7.5, 288.15).value,
0.096940958, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(21, 1013.25, 7.5, 288.15).value,
0.137887422, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(22, 1013.25, 7.5, 288.15).value,
0.17418431, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(23, 1013.25, 7.5, 288.15).value,
0.180393135, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(24, 1013.25, 7.5, 288.15).value,
0.15839854, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(25, 1013.25, 7.5, 288.15).value,
0.130540688, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(26, 1013.25, 7.5, 288.15).value,
0.108338372, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(27, 1013.25, 7.5, 288.15).value,
0.092962551, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(28, 1013.25, 7.5, 288.15).value,
0.082791566, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(29, 1013.25, 7.5, 288.15).value,
0.076209755, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(30, 1013.25, 7.5, 288.15).value,
0.072073391, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(31, 1013.25, 7.5, 288.15).value,
0.069632181, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(32, 1013.25, 7.5, 288.15).value,
0.06839841, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(33, 1013.25, 7.5, 288.15).value,
0.068050819, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(34, 1013.25, 7.5, 288.15).value,
0.068373336, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(35, 1013.25, 7.5, 288.15).value,
0.069217296, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(36, 1013.25, 7.5, 288.15).value,
0.070478105, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(37, 1013.25, 7.5, 288.15).value,
0.072080617, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(38, 1013.25, 7.5, 288.15).value,
0.073969796, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(39, 1013.25, 7.5, 288.15).value,
0.076104615, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(40, 1013.25, 7.5, 288.15).value,
0.078454003, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(41, 1013.25, 7.5, 288.15).value,
0.080994086, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(42, 1013.25, 7.5, 288.15).value,
0.08370628, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(43, 1013.25, 7.5, 288.15).value,
0.086575946, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(44, 1013.25, 7.5, 288.15).value,
0.089591433, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(45, 1013.25, 7.5, 288.15).value,
0.092743375, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(46, 1013.25, 7.5, 288.15).value,
0.096024183, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(47, 1013.25, 7.5, 288.15).value,
0.099427654, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(48, 1013.25, 7.5, 288.15).value,
0.102948692, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(49, 1013.25, 7.5, 288.15).value,
0.106583076, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(50, 1013.25, 7.5, 288.15).value,
0.110327299, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(51, 1013.25, 7.5, 288.15).value,
0.11417843, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(52, 1013.25, 7.5, 288.15).value,
0.118134012, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(53, 1013.25, 7.5, 288.15).value,
0.122191981, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(54, 1013.25, 7.5, 288.15).value,
0.126350598, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(55, 1013.25, 7.5, 288.15).value,
0.130608397, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(56, 1013.25, 7.5, 288.15).value,
0.134964144, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(57, 1013.25, 7.5, 288.15).value,
0.139416798, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(58, 1013.25, 7.5, 288.15).value,
0.143965489, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(59, 1013.25, 7.5, 288.15).value,
0.148609489, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(60, 1013.25, 7.5, 288.15).value,
0.153348196, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(61, 1013.25, 7.5, 288.15).value,
0.158181114, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(62, 1013.25, 7.5, 288.15).value,
0.163107847, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(63, 1013.25, 7.5, 288.15).value,
0.168128079, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(64, 1013.25, 7.5, 288.15).value,
0.173241572, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(65, 1013.25, 7.5, 288.15).value,
0.178448154, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(66, 1013.25, 7.5, 288.15).value,
0.183747712, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(67, 1013.25, 7.5, 288.15).value,
0.18914019, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(68, 1013.25, 7.5, 288.15).value,
0.194625582, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(69, 1013.25, 7.5, 288.15).value,
0.200203926, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(70, 1013.25, 7.5, 288.15).value,
0.205875306, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(71, 1013.25, 7.5, 288.15).value,
0.211639845, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(72, 1013.25, 7.5, 288.15).value,
0.217497702, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(73, 1013.25, 7.5, 288.15).value,
0.223449076, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(74, 1013.25, 7.5, 288.15).value,
0.229494196, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(75, 1013.25, 7.5, 288.15).value,
0.235633329, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(76, 1013.25, 7.5, 288.15).value,
0.241866771, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(77, 1013.25, 7.5, 288.15).value,
0.248194851, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(78, 1013.25, 7.5, 288.15).value,
0.254617931, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(79, 1013.25, 7.5, 288.15).value,
0.261136401, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(80, 1013.25, 7.5, 288.15).value,
0.267750686, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(81, 1013.25, 7.5, 288.15).value,
0.274461239, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(82, 1013.25, 7.5, 288.15).value,
0.281268547, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(83, 1013.25, 7.5, 288.15).value,
0.28817313, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(84, 1013.25, 7.5, 288.15).value,
0.295175539, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(85, 1013.25, 7.5, 288.15).value,
0.302276362, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(86, 1013.25, 7.5, 288.15).value,
0.309476219, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(87, 1013.25, 7.5, 288.15).value,
0.316775769, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(88, 1013.25, 7.5, 288.15).value,
0.324175708, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(89, 1013.25, 7.5, 288.15).value,
0.331676772, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(90, 1013.25, 7.5, 288.15).value,
0.339279738, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(91, 1013.25, 7.5, 288.15).value,
0.346985426, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(92, 1013.25, 7.5, 288.15).value,
0.354794703, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(93, 1013.25, 7.5, 288.15).value,
0.362708483, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(94, 1013.25, 7.5, 288.15).value,
0.370727732, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(95, 1013.25, 7.5, 288.15).value,
0.378853468, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(96, 1013.25, 7.5, 288.15).value,
0.387086768, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(97, 1013.25, 7.5, 288.15).value,
0.395428769, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(98, 1013.25, 7.5, 288.15).value,
0.403880673, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(99, 1013.25, 7.5, 288.15).value,
0.412443748, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(100, 1013.25, 7.5, 288.15).value,
0.421119341, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(101, 1013.25, 7.5, 288.15).value,
0.429908872, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(102, 1013.25, 7.5, 288.15).value,
0.438813848, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(103, 1013.25, 7.5, 288.15).value,
0.447835866, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(104, 1013.25, 7.5, 288.15).value,
0.456976619, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(105, 1013.25, 7.5, 288.15).value,
0.466237905, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(106, 1013.25, 7.5, 288.15).value,
0.475621633, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(107, 1013.25, 7.5, 288.15).value,
0.485129833, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(108, 1013.25, 7.5, 288.15).value,
0.494764666, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(109, 1013.25, 7.5, 288.15).value,
0.504528432, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(110, 1013.25, 7.5, 288.15).value,
0.514423584, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(111, 1013.25, 7.5, 288.15).value,
0.524452741, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(112, 1013.25, 7.5, 288.15).value,
0.5346187, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(113, 1013.25, 7.5, 288.15).value,
0.54492445, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(114, 1013.25, 7.5, 288.15).value,
0.555373195, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(115, 1013.25, 7.5, 288.15).value,
0.565968366, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(116, 1013.25, 7.5, 288.15).value,
0.576713646, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(117, 1013.25, 7.5, 288.15).value,
0.58761299, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(118, 1013.25, 7.5, 288.15).value,
0.598670654, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(119, 1013.25, 7.5, 288.15).value,
0.609891221, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(120, 1013.25, 7.5, 288.15).value,
0.621279631, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(121, 1013.25, 7.5, 288.15).value,
0.63284122, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(122, 1013.25, 7.5, 288.15).value,
0.644581758, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(123, 1013.25, 7.5, 288.15).value,
0.656507491, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(124, 1013.25, 7.5, 288.15).value,
0.668625191, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(125, 1013.25, 7.5, 288.15).value,
0.680942215, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(126, 1013.25, 7.5, 288.15).value,
0.69346656, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(127, 1013.25, 7.5, 288.15).value,
0.70620694, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(128, 1013.25, 7.5, 288.15).value,
0.719172861, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(129, 1013.25, 7.5, 288.15).value,
0.73237471, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(130, 1013.25, 7.5, 288.15).value,
0.745823861, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(131, 1013.25, 7.5, 288.15).value,
0.759532783, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(132, 1013.25, 7.5, 288.15).value,
0.773515178, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(133, 1013.25, 7.5, 288.15).value,
0.787786128, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(134, 1013.25, 7.5, 288.15).value,
0.802362262, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(135, 1013.25, 7.5, 288.15).value,
0.817261961, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(136, 1013.25, 7.5, 288.15).value,
0.832505575, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(137, 1013.25, 7.5, 288.15).value,
0.848115693, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(138, 1013.25, 7.5, 288.15).value,
0.864117433, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(139, 1013.25, 7.5, 288.15).value,
0.880538802, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(140, 1013.25, 7.5, 288.15).value,
0.897411097, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(141, 1013.25, 7.5, 288.15).value,
0.914769381, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(142, 1013.25, 7.5, 288.15).value,
0.93265304, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(143, 1013.25, 7.5, 288.15).value,
0.951106434, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(144, 1013.25, 7.5, 288.15).value,
0.970179674, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(145, 1013.25, 7.5, 288.15).value,
0.989929528, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(146, 1013.25, 7.5, 288.15).value,
1.010420514, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(147, 1013.25, 7.5, 288.15).value,
1.03172619, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(148, 1013.25, 7.5, 288.15).value,
1.053930717, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(149, 1013.25, 7.5, 288.15).value,
1.077130727, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(150, 1013.25, 7.5, 288.15).value,
1.101437596, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(151, 1013.25, 7.5, 288.15).value,
1.126980206, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(152, 1013.25, 7.5, 288.15).value,
1.153908327, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(153, 1013.25, 7.5, 288.15).value,
1.182396776, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(154, 1013.25, 7.5, 288.15).value,
1.212650574, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(155, 1013.25, 7.5, 288.15).value,
1.244911365, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(156, 1013.25, 7.5, 288.15).value,
1.279465482, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(157, 1013.25, 7.5, 288.15).value,
1.316654129, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(158, 1013.25, 7.5, 288.15).value,
1.356886363, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(159, 1013.25, 7.5, 288.15).value,
1.400655759, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(160, 1013.25, 7.5, 288.15).value,
1.448562004, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(161, 1013.25, 7.5, 288.15).value,
1.501339131, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(162, 1013.25, 7.5, 288.15).value,
1.559892824, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(163, 1013.25, 7.5, 288.15).value,
1.625350216, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(164, 1013.25, 7.5, 288.15).value,
1.699127159, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(165, 1013.25, 7.5, 288.15).value,
1.783020212, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(166, 1013.25, 7.5, 288.15).value,
1.87933414, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(167, 1013.25, 7.5, 288.15).value,
1.991061177, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(168, 1013.25, 7.5, 288.15).value,
2.122137016, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(169, 1013.25, 7.5, 288.15).value,
2.277812508, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(170, 1013.25, 7.5, 288.15).value,
2.465203115, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(171, 1013.25, 7.5, 288.15).value,
2.694116678, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(172, 1013.25, 7.5, 288.15).value,
2.978325696, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(173, 1013.25, 7.5, 288.15).value,
3.337563176, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(174, 1013.25, 7.5, 288.15).value,
3.80071648, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(175, 1013.25, 7.5, 288.15).value,
4.411026238, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(176, 1013.25, 7.5, 288.15).value,
5.23462829, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(177, 1013.25, 7.5, 288.15).value,
6.374446918, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(178, 1013.25, 7.5, 288.15).value,
7.991434174, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(179, 1013.25, 7.5, 288.15).value,
10.33006475, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(180, 1013.25, 7.5, 288.15).value,
13.71659631, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(181, 1013.25, 7.5, 288.15).value,
18.39188186, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(182, 1013.25, 7.5, 288.15).value,
23.83194406, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(183, 1013.25, 7.5, 288.15).value,
27.67449812, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(184, 1013.25, 7.5, 288.15).value,
27.03213321, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(185, 1013.25, 7.5, 288.15).value,
22.60135009, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(186, 1013.25, 7.5, 288.15).value,
17.47071693, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(187, 1013.25, 7.5, 288.15).value,
13.32603388, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(188, 1013.25, 7.5, 288.15).value,
10.35715037, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(189, 1013.25, 7.5, 288.15).value,
8.290514155, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(190, 1013.25, 7.5, 288.15).value,
6.842342894, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(191, 1013.25, 7.5, 288.15).value,
5.808188543, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(192, 1013.25, 7.5, 288.15).value,
5.053346248, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(193, 1013.25, 7.5, 288.15).value,
4.490555513, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(194, 1013.25, 7.5, 288.15).value,
4.062799398, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(195, 1013.25, 7.5, 288.15).value,
3.73214099, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(196, 1013.25, 7.5, 288.15).value,
3.472798768, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(197, 1013.25, 7.5, 288.15).value,
3.266872193, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(198, 1013.25, 7.5, 288.15).value,
3.101674612, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(199, 1013.25, 7.5, 288.15).value,
2.968040373, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(200, 1013.25, 7.5, 288.15).value,
2.859229328, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(201, 1013.25, 7.5, 288.15).value,
2.77020383, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(202, 1013.25, 7.5, 288.15).value,
2.697142323, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(203, 1013.25, 7.5, 288.15).value,
2.637106098, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(204, 1013.25, 7.5, 288.15).value,
2.587807034, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(205, 1013.25, 7.5, 288.15).value,
2.547443114, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(206, 1013.25, 7.5, 288.15).value,
2.514580199, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(207, 1013.25, 7.5, 288.15).value,
2.488065853, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(208, 1013.25, 7.5, 288.15).value,
2.466965732, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(209, 1013.25, 7.5, 288.15).value,
2.450516051, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(210, 1013.25, 7.5, 288.15).value,
2.43808768, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(211, 1013.25, 7.5, 288.15).value,
2.429158726, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(212, 1013.25, 7.5, 288.15).value,
2.423293411, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(213, 1013.25, 7.5, 288.15).value,
2.420125627, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(214, 1013.25, 7.5, 288.15).value,
2.419346051, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(215, 1013.25, 7.5, 288.15).value,
2.420691947, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(216, 1013.25, 7.5, 288.15).value,
2.423939044, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(217, 1013.25, 7.5, 288.15).value,
2.428895021, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(218, 1013.25, 7.5, 288.15).value,
2.435394244, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(219, 1013.25, 7.5, 288.15).value,
2.443293492, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(220, 1013.25, 7.5, 288.15).value,
2.452468459, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(221, 1013.25, 7.5, 288.15).value,
2.462810881, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(222, 1013.25, 7.5, 288.15).value,
2.474226165, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(223, 1013.25, 7.5, 288.15).value,
2.486631411, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(224, 1013.25, 7.5, 288.15).value,
2.499953772, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(225, 1013.25, 7.5, 288.15).value,
2.514129072, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(226, 1013.25, 7.5, 288.15).value,
2.529100646, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(227, 1013.25, 7.5, 288.15).value,
2.544818361, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(228, 1013.25, 7.5, 288.15).value,
2.561237781, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(229, 1013.25, 7.5, 288.15).value,
2.578319459, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(230, 1013.25, 7.5, 288.15).value,
2.596028333, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(231, 1013.25, 7.5, 288.15).value,
2.614333207, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(232, 1013.25, 7.5, 288.15).value,
2.633206307, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(233, 1013.25, 7.5, 288.15).value,
2.652622892, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(234, 1013.25, 7.5, 288.15).value,
2.672560927, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(235, 1013.25, 7.5, 288.15).value,
2.693000794, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(236, 1013.25, 7.5, 288.15).value,
2.713925039, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(237, 1013.25, 7.5, 288.15).value,
2.735318161, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(238, 1013.25, 7.5, 288.15).value,
2.757166418, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(239, 1013.25, 7.5, 288.15).value,
2.779457666, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(240, 1013.25, 7.5, 288.15).value,
2.80218121, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(241, 1013.25, 7.5, 288.15).value,
2.825327683, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(242, 1013.25, 7.5, 288.15).value,
2.848888936, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(243, 1013.25, 7.5, 288.15).value,
2.87285794, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(244, 1013.25, 7.5, 288.15).value,
2.897228704, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(245, 1013.25, 7.5, 288.15).value,
2.921996202, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(246, 1013.25, 7.5, 288.15).value,
2.947156311, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(247, 1013.25, 7.5, 288.15).value,
2.972705756, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(248, 1013.25, 7.5, 288.15).value,
2.998642066, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(249, 1013.25, 7.5, 288.15).value,
3.024963531, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(250, 1013.25, 7.5, 288.15).value,
3.051669175, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(251, 1013.25, 7.5, 288.15).value,
3.078758722, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(252, 1013.25, 7.5, 288.15).value,
3.106232585, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(253, 1013.25, 7.5, 288.15).value,
3.13409184, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(254, 1013.25, 7.5, 288.15).value,
3.162338224, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(255, 1013.25, 7.5, 288.15).value,
3.190974123, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(256, 1013.25, 7.5, 288.15).value,
3.220002576, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(257, 1013.25, 7.5, 288.15).value,
3.249427276, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(258, 1013.25, 7.5, 288.15).value,
3.27925258, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(259, 1013.25, 7.5, 288.15).value,
3.309483522, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(260, 1013.25, 7.5, 288.15).value,
3.340125831, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(261, 1013.25, 7.5, 288.15).value,
3.371185956, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(262, 1013.25, 7.5, 288.15).value,
3.402671096, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(263, 1013.25, 7.5, 288.15).value,
3.434589233, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(264, 1013.25, 7.5, 288.15).value,
3.466949175, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(265, 1013.25, 7.5, 288.15).value,
3.499760606, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(266, 1013.25, 7.5, 288.15).value,
3.533034141, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(267, 1013.25, 7.5, 288.15).value,
3.566781392, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(268, 1013.25, 7.5, 288.15).value,
3.601015043, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(269, 1013.25, 7.5, 288.15).value,
3.635748934, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(270, 1013.25, 7.5, 288.15).value,
3.670998161, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(271, 1013.25, 7.5, 288.15).value,
3.706779184, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(272, 1013.25, 7.5, 288.15).value,
3.743109957, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(273, 1013.25, 7.5, 288.15).value,
3.780010072, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(274, 1013.25, 7.5, 288.15).value,
3.817500924, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(275, 1013.25, 7.5, 288.15).value,
3.855605898, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(276, 1013.25, 7.5, 288.15).value,
3.894350591, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(277, 1013.25, 7.5, 288.15).value,
3.933763053, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(278, 1013.25, 7.5, 288.15).value,
3.97387408, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(279, 1013.25, 7.5, 288.15).value,
4.014717535, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(280, 1013.25, 7.5, 288.15).value,
4.056330735, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(281, 1013.25, 7.5, 288.15).value,
4.098754887, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(282, 1013.25, 7.5, 288.15).value,
4.142035602, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(283, 1013.25, 7.5, 288.15).value,
4.186223487, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(284, 1013.25, 7.5, 288.15).value,
4.231374849, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(285, 1013.25, 7.5, 288.15).value,
4.277552506, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(286, 1013.25, 7.5, 288.15).value,
4.324826757, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(287, 1013.25, 7.5, 288.15).value,
4.373276518, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(288, 1013.25, 7.5, 288.15).value,
4.422990681, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(289, 1013.25, 7.5, 288.15).value,
4.474069728, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(290, 1013.25, 7.5, 288.15).value,
4.526627666, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(291, 1013.25, 7.5, 288.15).value,
4.580794366, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(292, 1013.25, 7.5, 288.15).value,
4.63671838, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(293, 1013.25, 7.5, 288.15).value,
4.694570386, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(294, 1013.25, 7.5, 288.15).value,
4.754547391, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(295, 1013.25, 7.5, 288.15).value,
4.816877916, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(296, 1013.25, 7.5, 288.15).value,
4.881828419, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(297, 1013.25, 7.5, 288.15).value,
4.949711312, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(298, 1013.25, 7.5, 288.15).value,
5.020895036, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(299, 1013.25, 7.5, 288.15).value,
5.095816817, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(300, 1013.25, 7.5, 288.15).value,
5.174998967, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(301, 1013.25, 7.5, 288.15).value,
5.259069863, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(302, 1013.25, 7.5, 288.15).value,
5.348791238, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(303, 1013.25, 7.5, 288.15).value,
5.445094008, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(304, 1013.25, 7.5, 288.15).value,
5.549125837, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(305, 1013.25, 7.5, 288.15).value,
5.662315008, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(306, 1013.25, 7.5, 288.15).value,
5.786457272, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(307, 1013.25, 7.5, 288.15).value,
5.923835584, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(308, 1013.25, 7.5, 288.15).value,
6.077387588, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(309, 1013.25, 7.5, 288.15).value,
6.250943769, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(310, 1013.25, 7.5, 288.15).value,
6.449572043, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(311, 1013.25, 7.5, 288.15).value,
6.6800861, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(312, 1013.25, 7.5, 288.15).value,
6.951811263, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(313, 1013.25, 7.5, 288.15).value,
7.277764947, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(314, 1013.25, 7.5, 288.15).value,
7.676520947, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(315, 1013.25, 7.5, 288.15).value,
8.175226571, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(316, 1013.25, 7.5, 288.15).value,
8.814587905, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(317, 1013.25, 7.5, 288.15).value,
9.657150573, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(318, 1013.25, 7.5, 288.15).value,
10.80040832, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(319, 1013.25, 7.5, 288.15).value,
12.39287203, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(320, 1013.25, 7.5, 288.15).value,
14.63291434, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(321, 1013.25, 7.5, 288.15).value,
17.71848802, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(322, 1013.25, 7.5, 288.15).value,
21.89833011, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(323, 1013.25, 7.5, 288.15).value,
27.52921207, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(324, 1013.25, 7.5, 288.15).value,
33.93584273, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(325, 1013.25, 7.5, 288.15).value,
37.82487596, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(326, 1013.25, 7.5, 288.15).value,
35.8615979, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(327, 1013.25, 7.5, 288.15).value,
29.89188489, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(328, 1013.25, 7.5, 288.15).value,
23.80724266, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(329, 1013.25, 7.5, 288.15).value,
19.19466647, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(330, 1013.25, 7.5, 288.15).value,
16.01196137, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(331, 1013.25, 7.5, 288.15).value,
13.85529573, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(332, 1013.25, 7.5, 288.15).value,
12.38126427, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(333, 1013.25, 7.5, 288.15).value,
11.35803945, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(334, 1013.25, 7.5, 288.15).value,
10.63752623, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(335, 1013.25, 7.5, 288.15).value,
10.12544, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(336, 1013.25, 7.5, 288.15).value,
9.760866789, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(337, 1013.25, 7.5, 288.15).value,
9.503637932, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(338, 1013.25, 7.5, 288.15).value,
9.326698304, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(339, 1013.25, 7.5, 288.15).value,
9.211467317, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(340, 1013.25, 7.5, 288.15).value,
9.144973785, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(341, 1013.25, 7.5, 288.15).value,
9.118051719, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(342, 1013.25, 7.5, 288.15).value,
9.124181675, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(343, 1013.25, 7.5, 288.15).value,
9.158733184, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(344, 1013.25, 7.5, 288.15).value,
9.218462001, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(345, 1013.25, 7.5, 288.15).value,
9.301173194, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(346, 1013.25, 7.5, 288.15).value,
9.405494946, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(347, 1013.25, 7.5, 288.15).value,
9.53072851, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(348, 1013.25, 7.5, 288.15).value,
9.676752463, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(349, 1013.25, 7.5, 288.15).value,
9.843967537, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(350, 1013.25, 7.5, 288.15).value,
10.03327368, places=5)
def test_gamma0_approx(self):
self.assertAlmostEqual(
models.itu676.gamma0_approx(1, 1013.25, 7.5, 288.15).value,
0.005388658, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(2, 1013.25, 7.5, 288.15).value,
0.006716038, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(3, 1013.25, 7.5, 288.15).value,
0.00707596, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(4, 1013.25, 7.5, 288.15).value,
0.007258969, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(5, 1013.25, 7.5, 288.15).value,
0.007400426, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(6, 1013.25, 7.5, 288.15).value,
0.007537212, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(7, 1013.25, 7.5, 288.15).value,
0.007682905, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(8, 1013.25, 7.5, 288.15).value,
0.007843794, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(9, 1013.25, 7.5, 288.15).value,
0.008023466, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(10, 1013.25, 7.5, 288.15).value,
0.008224416, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(11, 1013.25, 7.5, 288.15).value,
0.008448705, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(12, 1013.25, 7.5, 288.15).value,
0.008698263, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(13, 1013.25, 7.5, 288.15).value,
0.008975056, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14, 1013.25, 7.5, 288.15).value,
0.009281177, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(15, 1013.25, 7.5, 288.15).value,
0.009618923, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(16, 1013.25, 7.5, 288.15).value,
0.009990845, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(17, 1013.25, 7.5, 288.15).value,
0.010399811, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(18, 1013.25, 7.5, 288.15).value,
0.010849054, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(19, 1013.25, 7.5, 288.15).value,
0.011342243, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(20, 1013.25, 7.5, 288.15).value,
0.011883547, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(21, 1013.25, 7.5, 288.15).value,
0.012477725, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(22, 1013.25, 7.5, 288.15).value,
0.013130219, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(23, 1013.25, 7.5, 288.15).value,
0.013847273, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(24, 1013.25, 7.5, 288.15).value,
0.014636078, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(25, 1013.25, 7.5, 288.15).value,
0.015504937, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(26, 1013.25, 7.5, 288.15).value,
0.016463481, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(27, 1013.25, 7.5, 288.15).value,
0.017522921, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(28, 1013.25, 7.5, 288.15).value,
0.018696367, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5, 288.15).value,
0.019999221, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(30, 1013.25, 7.5, 288.15).value,
0.021449673, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(31, 1013.25, 7.5, 288.15).value,
0.023069328, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(32, 1013.25, 7.5, 288.15).value,
0.024883993, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(33, 1013.25, 7.5, 288.15).value,
0.026924712, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(34, 1013.25, 7.5, 288.15).value,
0.029229084, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(35, 1013.25, 7.5, 288.15).value,
0.031843013, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(36, 1013.25, 7.5, 288.15).value,
0.034823023, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(37, 1013.25, 7.5, 288.15).value,
0.038239374, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(38, 1013.25, 7.5, 288.15).value,
0.042180317, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(39, 1013.25, 7.5, 288.15).value,
0.046757999, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(40, 1013.25, 7.5, 288.15).value,
0.052116797, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(41, 1013.25, 7.5, 288.15).value,
0.058445339, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(42, 1013.25, 7.5, 288.15).value,
0.065994232, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(43, 1013.25, 7.5, 288.15).value,
0.075102941, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(44, 1013.25, 7.5, 288.15).value,
0.086241846, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(45, 1013.25, 7.5, 288.15).value,
0.100080659, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(46, 1013.25, 7.5, 288.15).value,
0.117605188, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(47, 1013.25, 7.5, 288.15).value,
0.140329657, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(48, 1013.25, 7.5, 288.15).value,
0.170719546, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(49, 1013.25, 7.5, 288.15).value,
0.213175063, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(50, 1013.25, 7.5, 288.15).value,
0.277268297, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(51, 1013.25, 7.5, 288.15).value,
0.389670239, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(52, 1013.25, 7.5, 288.15).value,
0.618429331, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(53, 1013.25, 7.5, 288.15).value,
1.126611463, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(54, 1013.25, 7.5, 288.15).value,
2.211541194, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(55, 1013.25, 7.5, 288.15).value,
4.193281287, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(56, 1013.25, 7.5, 288.15).value,
7.055044748, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(57, 1013.25, 7.5, 288.15).value,
10.0652395, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(58, 1013.25, 7.5, 288.15).value,
12.35314971, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(59, 1013.25, 7.5, 288.15).value,
13.63529754, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(60, 1013.25, 7.5, 288.15).value,
14.62347701, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(61, 1013.25, 7.5, 288.15).value,
15.00716194, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(62, 1013.25, 7.5, 288.15).value,
13.99621411, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(63, 1013.25, 7.5, 288.15).value,
10.83108919, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(64, 1013.25, 7.5, 288.15).value,
6.844588337, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(65, 1013.25, 7.5, 288.15).value,
3.80880229, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(66, 1013.25, 7.5, 288.15).value,
1.966616477, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(67, 1013.25, 7.5, 288.15).value,
1.033387448, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(68, 1013.25, 7.5, 288.15).value,
0.60546544, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(69, 1013.25, 7.5, 288.15).value,
0.406984877, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(70, 1013.25, 7.5, 288.15).value,
0.304104518, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(71, 1013.25, 7.5, 288.15).value,
0.24160024, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(72, 1013.25, 7.5, 288.15).value,
0.198531458, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(73, 1013.25, 7.5, 288.15).value,
0.167045465, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(74, 1013.25, 7.5, 288.15).value,
0.143141978, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(75, 1013.25, 7.5, 288.15).value,
0.124484922, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(76, 1013.25, 7.5, 288.15).value,
0.109604088, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(77, 1013.25, 7.5, 288.15).value,
0.09752563, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(78, 1013.25, 7.5, 288.15).value,
0.087579095, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(79, 1013.25, 7.5, 288.15).value,
0.079288509, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(80, 1013.25, 7.5, 288.15).value,
0.072307337, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(81, 1013.25, 7.5, 288.15).value,
0.066377906, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(82, 1013.25, 7.5, 288.15).value,
0.061305161, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(83, 1013.25, 7.5, 288.15).value,
0.05693918, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(84, 1013.25, 7.5, 288.15).value,
0.053163238, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(85, 1013.25, 7.5, 288.15).value,
0.049885471, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(86, 1013.25, 7.5, 288.15).value,
0.047032946, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(87, 1013.25, 7.5, 288.15).value,
0.044547391, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(88, 1013.25, 7.5, 288.15).value,
0.042382069, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(89, 1013.25, 7.5, 288.15).value,
0.040499471, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(90, 1013.25, 7.5, 288.15).value,
0.038869622, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(91, 1013.25, 7.5, 288.15).value,
0.037468818, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(92, 1013.25, 7.5, 288.15).value,
0.036278727, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(93, 1013.25, 7.5, 288.15).value,
0.035285753, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(94, 1013.25, 7.5, 288.15).value,
0.034480646, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(95, 1013.25, 7.5, 288.15).value,
0.033858315, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(96, 1013.25, 7.5, 288.15).value,
0.033417859, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(97, 1013.25, 7.5, 288.15).value,
0.033162815, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(98, 1013.25, 7.5, 288.15).value,
0.033101677, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(99, 1013.25, 7.5, 288.15).value,
0.033248738, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(100, 1013.25, 7.5, 288.15).value,
0.033625377, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(101, 1013.25, 7.5, 288.15).value,
0.034261951, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(102, 1013.25, 7.5, 288.15).value,
0.03520058, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(103, 1013.25, 7.5, 288.15).value,
0.036499225, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(104, 1013.25, 7.5, 288.15).value,
0.038237778, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(105, 1013.25, 7.5, 288.15).value,
0.040527282, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(106, 1013.25, 7.5, 288.15).value,
0.043524209, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(107, 1013.25, 7.5, 288.15).value,
0.047453183, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(108, 1013.25, 7.5, 288.15).value,
0.05264422, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(109, 1013.25, 7.5, 288.15).value,
0.059596011, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(110, 1013.25, 7.5, 288.15).value,
0.069087844, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(111, 1013.25, 7.5, 288.15).value,
0.082387054, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(112, 1013.25, 7.5, 288.15).value,
0.101654574, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(113, 1013.25, 7.5, 288.15).value,
0.130786962, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(114, 1013.25, 7.5, 288.15).value,
0.177281273, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(115, 1013.25, 7.5, 288.15).value,
0.25660834, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(116, 1013.25, 7.5, 288.15).value,
0.402453591, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(117, 1013.25, 7.5, 288.15).value,
0.683016431, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(118, 1013.25, 7.5, 288.15).value,
1.134866447, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(119, 1013.25, 7.5, 288.15).value,
1.306379447, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(120, 1013.25, 7.5, 288.15).value,
0.886108944, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(121, 1013.25, 7.5, 288.15).value,
0.509171816, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(122, 1013.25, 7.5, 288.15).value,
0.307768488, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(123, 1013.25, 7.5, 288.15).value,
0.202100995, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(124, 1013.25, 7.5, 288.15).value,
0.142570138, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(125, 1013.25, 7.5, 288.15).value,
0.106445548, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(126, 1013.25, 7.5, 288.15).value,
0.083103974, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(127, 1013.25, 7.5, 288.15).value,
0.067232038, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(128, 1013.25, 7.5, 288.15).value,
0.055982184, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(129, 1013.25, 7.5, 288.15).value,
0.047732332, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(130, 1013.25, 7.5, 288.15).value,
0.041509034, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(131, 1013.25, 7.5, 288.15).value,
0.036701642, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(132, 1013.25, 7.5, 288.15).value,
0.032912343, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(133, 1013.25, 7.5, 288.15).value,
0.029873422, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(134, 1013.25, 7.5, 288.15).value,
0.027399581, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(135, 1013.25, 7.5, 288.15).value,
0.025359375, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(136, 1013.25, 7.5, 288.15).value,
0.02365753, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(137, 1013.25, 7.5, 288.15).value,
0.022223652, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(138, 1013.25, 7.5, 288.15).value,
0.021004834, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(139, 1013.25, 7.5, 288.15).value,
0.019960701, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(140, 1013.25, 7.5, 288.15).value,
0.019060012, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(141, 1013.25, 7.5, 288.15).value,
0.018278288, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(142, 1013.25, 7.5, 288.15).value,
0.017596128, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(143, 1013.25, 7.5, 288.15).value,
0.016997997, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(144, 1013.25, 7.5, 288.15).value,
0.016471333, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(145, 1013.25, 7.5, 288.15).value,
0.016005887, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(146, 1013.25, 7.5, 288.15).value,
0.015593233, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(147, 1013.25, 7.5, 288.15).value,
0.015226386, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(148, 1013.25, 7.5, 288.15).value,
0.014899516, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(149, 1013.25, 7.5, 288.15).value,
0.014607727, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(150, 1013.25, 7.5, 288.15).value,
0.01434688, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(151, 1013.25, 7.5, 288.15).value,
0.014113455, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(152, 1013.25, 7.5, 288.15).value,
0.013904444, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(153, 1013.25, 7.5, 288.15).value,
0.013717263, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(154, 1013.25, 7.5, 288.15).value,
0.013549678, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(155, 1013.25, 7.5, 288.15).value,
0.013399755, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(156, 1013.25, 7.5, 288.15).value,
0.013265808, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(157, 1013.25, 7.5, 288.15).value,
0.013146362, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(158, 1013.25, 7.5, 288.15).value,
0.013040122, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(159, 1013.25, 7.5, 288.15).value,
0.012945947, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(160, 1013.25, 7.5, 288.15).value,
0.012862828, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(161, 1013.25, 7.5, 288.15).value,
0.012789869, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(162, 1013.25, 7.5, 288.15).value,
0.012726273, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(163, 1013.25, 7.5, 288.15).value,
0.012671328, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(164, 1013.25, 7.5, 288.15).value,
0.012624397, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(165, 1013.25, 7.5, 288.15).value,
0.012584907, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(166, 1013.25, 7.5, 288.15).value,
0.012552343, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(167, 1013.25, 7.5, 288.15).value,
0.012526238, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(168, 1013.25, 7.5, 288.15).value,
0.012506174, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(169, 1013.25, 7.5, 288.15).value,
0.012491766, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(170, 1013.25, 7.5, 288.15).value,
0.012482668, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(171, 1013.25, 7.5, 288.15).value,
0.012478563, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(172, 1013.25, 7.5, 288.15).value,
0.012479162, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(173, 1013.25, 7.5, 288.15).value,
0.012484201, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(174, 1013.25, 7.5, 288.15).value,
0.012493438, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(175, 1013.25, 7.5, 288.15).value,
0.01250665, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(176, 1013.25, 7.5, 288.15).value,
0.012523632, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(177, 1013.25, 7.5, 288.15).value,
0.012544196, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(178, 1013.25, 7.5, 288.15).value,
0.012568166, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(179, 1013.25, 7.5, 288.15).value,
0.012595383, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(180, 1013.25, 7.5, 288.15).value,
0.012625696, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(181, 1013.25, 7.5, 288.15).value,
0.012658968, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(182, 1013.25, 7.5, 288.15).value,
0.01269507, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(183, 1013.25, 7.5, 288.15).value,
0.012733882, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(184, 1013.25, 7.5, 288.15).value,
0.012775293, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(185, 1013.25, 7.5, 288.15).value,
0.012819198, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(186, 1013.25, 7.5, 288.15).value,
0.012865502, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(187, 1013.25, 7.5, 288.15).value,
0.012914112, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(188, 1013.25, 7.5, 288.15).value,
0.012964945, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(189, 1013.25, 7.5, 288.15).value,
0.01301792, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(190, 1013.25, 7.5, 288.15).value,
0.013072963, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(191, 1013.25, 7.5, 288.15).value,
0.013130004, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(192, 1013.25, 7.5, 288.15).value,
0.013188976, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(193, 1013.25, 7.5, 288.15).value,
0.013249818, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(194, 1013.25, 7.5, 288.15).value,
0.013312471, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(195, 1013.25, 7.5, 288.15).value,
0.01337688, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(196, 1013.25, 7.5, 288.15).value,
0.013442993, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(197, 1013.25, 7.5, 288.15).value,
0.01351076, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(198, 1013.25, 7.5, 288.15).value,
0.013580135, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(199, 1013.25, 7.5, 288.15).value,
0.013651074, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(200, 1013.25, 7.5, 288.15).value,
0.013723536, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(201, 1013.25, 7.5, 288.15).value,
0.01379748, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(202, 1013.25, 7.5, 288.15).value,
0.013872869, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(203, 1013.25, 7.5, 288.15).value,
0.013949668, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(204, 1013.25, 7.5, 288.15).value,
0.014027843, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(205, 1013.25, 7.5, 288.15).value,
0.014107361, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(206, 1013.25, 7.5, 288.15).value,
0.014188192, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(207, 1013.25, 7.5, 288.15).value,
0.014270307, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(208, 1013.25, 7.5, 288.15).value,
0.014353678, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(209, 1013.25, 7.5, 288.15).value,
0.014438278, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(210, 1013.25, 7.5, 288.15).value,
0.014524083, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(211, 1013.25, 7.5, 288.15).value,
0.014611069, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(212, 1013.25, 7.5, 288.15).value,
0.014699212, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(213, 1013.25, 7.5, 288.15).value,
0.01478849, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(214, 1013.25, 7.5, 288.15).value,
0.014878883, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(215, 1013.25, 7.5, 288.15).value,
0.01497037, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(216, 1013.25, 7.5, 288.15).value,
0.015062932, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(217, 1013.25, 7.5, 288.15).value,
0.015156551, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(218, 1013.25, 7.5, 288.15).value,
0.01525121, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(219, 1013.25, 7.5, 288.15).value,
0.015346891, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(220, 1013.25, 7.5, 288.15).value,
0.015443579, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(221, 1013.25, 7.5, 288.15).value,
0.015541258, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(222, 1013.25, 7.5, 288.15).value,
0.015639912, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(223, 1013.25, 7.5, 288.15).value,
0.015739529, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(224, 1013.25, 7.5, 288.15).value,
0.015840094, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(225, 1013.25, 7.5, 288.15).value,
0.015941595, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(226, 1013.25, 7.5, 288.15).value,
0.016044018, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(227, 1013.25, 7.5, 288.15).value,
0.016147352, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(228, 1013.25, 7.5, 288.15).value,
0.016251585, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(229, 1013.25, 7.5, 288.15).value,
0.016356706, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(230, 1013.25, 7.5, 288.15).value,
0.016462705, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(231, 1013.25, 7.5, 288.15).value,
0.016569571, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(232, 1013.25, 7.5, 288.15).value,
0.016677295, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(233, 1013.25, 7.5, 288.15).value,
0.016785866, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(234, 1013.25, 7.5, 288.15).value,
0.016895277, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(235, 1013.25, 7.5, 288.15).value,
0.017005518, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(236, 1013.25, 7.5, 288.15).value,
0.017116581, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(237, 1013.25, 7.5, 288.15).value,
0.017228459, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(238, 1013.25, 7.5, 288.15).value,
0.017341142, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(239, 1013.25, 7.5, 288.15).value,
0.017454625, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(240, 1013.25, 7.5, 288.15).value,
0.0175689, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(241, 1013.25, 7.5, 288.15).value,
0.01768396, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(242, 1013.25, 7.5, 288.15).value,
0.017799799, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(243, 1013.25, 7.5, 288.15).value,
0.017916411, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(244, 1013.25, 7.5, 288.15).value,
0.018033789, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(245, 1013.25, 7.5, 288.15).value,
0.018151929, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(246, 1013.25, 7.5, 288.15).value,
0.018270824, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(247, 1013.25, 7.5, 288.15).value,
0.01839047, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(248, 1013.25, 7.5, 288.15).value,
0.018510862, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(249, 1013.25, 7.5, 288.15).value,
0.018631995, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(250, 1013.25, 7.5, 288.15).value,
0.018753865, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(251, 1013.25, 7.5, 288.15).value,
0.018876467, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(252, 1013.25, 7.5, 288.15).value,
0.018999798, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(253, 1013.25, 7.5, 288.15).value,
0.019123854, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(254, 1013.25, 7.5, 288.15).value,
0.019248631, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(255, 1013.25, 7.5, 288.15).value,
0.019374127, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(256, 1013.25, 7.5, 288.15).value,
0.019500338, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(257, 1013.25, 7.5, 288.15).value,
0.019627261, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(258, 1013.25, 7.5, 288.15).value,
0.019754894, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(259, 1013.25, 7.5, 288.15).value,
0.019883235, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(260, 1013.25, 7.5, 288.15).value,
0.02001228, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(261, 1013.25, 7.5, 288.15).value,
0.020142029, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(262, 1013.25, 7.5, 288.15).value,
0.02027248, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(263, 1013.25, 7.5, 288.15).value,
0.020403631, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(264, 1013.25, 7.5, 288.15).value,
0.020535481, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(265, 1013.25, 7.5, 288.15).value,
0.020668028, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(266, 1013.25, 7.5, 288.15).value,
0.020801273, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(267, 1013.25, 7.5, 288.15).value,
0.020935214, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(268, 1013.25, 7.5, 288.15).value,
0.021069851, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(269, 1013.25, 7.5, 288.15).value,
0.021205184, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(270, 1013.25, 7.5, 288.15).value,
0.021341213, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(271, 1013.25, 7.5, 288.15).value,
0.021477939, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(272, 1013.25, 7.5, 288.15).value,
0.021615362, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(273, 1013.25, 7.5, 288.15).value,
0.021753483, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(274, 1013.25, 7.5, 288.15).value,
0.021892304, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(275, 1013.25, 7.5, 288.15).value,
0.022031825, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(276, 1013.25, 7.5, 288.15).value,
0.02217205, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(277, 1013.25, 7.5, 288.15).value,
0.022312979, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(278, 1013.25, 7.5, 288.15).value,
0.022454615, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(279, 1013.25, 7.5, 288.15).value,
0.022596961, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(280, 1013.25, 7.5, 288.15).value,
0.022740019, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(281, 1013.25, 7.5, 288.15).value,
0.022883795, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(282, 1013.25, 7.5, 288.15).value,
0.02302829, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(283, 1013.25, 7.5, 288.15).value,
0.02317351, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(284, 1013.25, 7.5, 288.15).value,
0.023319459, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(285, 1013.25, 7.5, 288.15).value,
0.023466142, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(286, 1013.25, 7.5, 288.15).value,
0.023613565, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(287, 1013.25, 7.5, 288.15).value,
0.023761733, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(288, 1013.25, 7.5, 288.15).value,
0.023910653, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(289, 1013.25, 7.5, 288.15).value,
0.024060332, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(290, 1013.25, 7.5, 288.15).value,
0.024210778, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(291, 1013.25, 7.5, 288.15).value,
0.024361999, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(292, 1013.25, 7.5, 288.15).value,
0.024514003, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(293, 1013.25, 7.5, 288.15).value,
0.024666801, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(294, 1013.25, 7.5, 288.15).value,
0.024820402, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(295, 1013.25, 7.5, 288.15).value,
0.024974817, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(296, 1013.25, 7.5, 288.15).value,
0.025130058, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(297, 1013.25, 7.5, 288.15).value,
0.025286138, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(298, 1013.25, 7.5, 288.15).value,
0.025443071, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(299, 1013.25, 7.5, 288.15).value,
0.025600871, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(300, 1013.25, 7.5, 288.15).value,
0.025759555, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(301, 1013.25, 7.5, 288.15).value,
0.025919138, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(302, 1013.25, 7.5, 288.15).value,
0.026079639, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(303, 1013.25, 7.5, 288.15).value,
0.026241079, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(304, 1013.25, 7.5, 288.15).value,
0.026403477, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(305, 1013.25, 7.5, 288.15).value,
0.026566857, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(306, 1013.25, 7.5, 288.15).value,
0.026731244, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(307, 1013.25, 7.5, 288.15).value,
0.026896663, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(308, 1013.25, 7.5, 288.15).value,
0.027063143, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(309, 1013.25, 7.5, 288.15).value,
0.027230715, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(310, 1013.25, 7.5, 288.15).value,
0.027399412, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(311, 1013.25, 7.5, 288.15).value,
0.02756927, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(312, 1013.25, 7.5, 288.15).value,
0.027740328, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(313, 1013.25, 7.5, 288.15).value,
0.027912629, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(314, 1013.25, 7.5, 288.15).value,
0.028086218, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(315, 1013.25, 7.5, 288.15).value,
0.028261145, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(316, 1013.25, 7.5, 288.15).value,
0.028437464, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(317, 1013.25, 7.5, 288.15).value,
0.028615235, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(318, 1013.25, 7.5, 288.15).value,
0.028794523, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(319, 1013.25, 7.5, 288.15).value,
0.028975399, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(320, 1013.25, 7.5, 288.15).value,
0.029157939, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(321, 1013.25, 7.5, 288.15).value,
0.029342231, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(322, 1013.25, 7.5, 288.15).value,
0.029528367, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(323, 1013.25, 7.5, 288.15).value,
0.029716451, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(324, 1013.25, 7.5, 288.15).value,
0.029906599, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(325, 1013.25, 7.5, 288.15).value,
0.030098937, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(326, 1013.25, 7.5, 288.15).value,
0.030293607, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(327, 1013.25, 7.5, 288.15).value,
0.030490765, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(328, 1013.25, 7.5, 288.15).value,
0.030690588, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(329, 1013.25, 7.5, 288.15).value,
0.030893273, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(330, 1013.25, 7.5, 288.15).value,
0.031099041, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(331, 1013.25, 7.5, 288.15).value,
0.031308143, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(332, 1013.25, 7.5, 288.15).value,
0.031520859, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(333, 1013.25, 7.5, 288.15).value,
0.031737512, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(334, 1013.25, 7.5, 288.15).value,
0.031958467, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(335, 1013.25, 7.5, 288.15).value,
0.032184144, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(336, 1013.25, 7.5, 288.15).value,
0.032415022, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(337, 1013.25, 7.5, 288.15).value,
0.032651659, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(338, 1013.25, 7.5, 288.15).value,
0.032894698, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(339, 1013.25, 7.5, 288.15).value,
0.033144893, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(340, 1013.25, 7.5, 288.15).value,
0.033403124, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(341, 1013.25, 7.5, 288.15).value,
0.033670433, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(342, 1013.25, 7.5, 288.15).value,
0.033948053, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(343, 1013.25, 7.5, 288.15).value,
0.034237459, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(344, 1013.25, 7.5, 288.15).value,
0.034540422, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(345, 1013.25, 7.5, 288.15).value,
0.034859093, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(346, 1013.25, 7.5, 288.15).value,
0.035196097, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(347, 1013.25, 7.5, 288.15).value,
0.03555467, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(348, 1013.25, 7.5, 288.15).value,
0.03593884, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(349, 1013.25, 7.5, 288.15).value,
0.036353672, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(350, 1013.25, 7.5, 288.15).value,
0.036805605, places=5)
def test_zenit_water_vapour_attenuation(self):
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 1.0, 14.25).value,
0.064981043, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 1.0, 14.25).value,
0.070360091, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 1.0, 14.25).value,
0.074660262, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 0.5, 14.25).value,
0.06911297, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 0.5, 14.25).value,
0.073434531, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 0.5, 14.25).value,
0.080098077, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 0.3, 14.25).value,
0.072394726, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 0.3, 14.25).value,
0.075162715, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 0.3, 14.25).value,
0.083750389, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 0.2, 14.25).value,
0.074394064, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 0.2, 14.25).value,
0.076695287, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 0.2, 14.25).value,
0.086350752, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 1.0, 29).value,
0.305636526, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 1.0, 29).value,
0.331425898, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 1.0, 29).value,
0.355205229, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 0.5, 29).value,
0.324977228, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 0.5, 29).value,
0.345830132, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 0.5, 29).value,
0.38091961, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 0.3, 29).value,
0.340327583, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 0.3, 29).value,
0.353923317, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 0.3, 29).value,
0.398176611, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 0.2, 29).value,
0.349674822, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 0.2, 29).value,
0.361098289, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 0.2, 29).value,
0.410456469, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 1.0, 14.25).value,
0.099820608, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 1.0, 14.25).value,
0.118484695, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 0.5, 14.25).value,
0.105446054, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 0.5, 14.25).value,
0.12252307, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 0.3, 14.25).value,
0.108812058, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 0.3, 14.25).value,
0.125093339, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 0.2, 14.25).value,
0.111441086, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 0.2, 14.25).value,
0.127090376, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 1.0, 29).value,
0.473979935, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 1.0, 29).value,
0.561753331, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 0.5, 29).value,
0.500468518, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 0.5, 29).value,
0.580717641, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 0.3, 29).value,
0.516307047, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 0.3, 29).value,
0.592782098, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 0.2, 29).value,
0.528672179, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 0.2, 29).value,
0.602152942, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 1.0, 14.25).value,
0.149156898, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 1.0, 14.25).value,
0.121165007, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 1.0, 14.25).value,
0.051589359, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 0.5, 14.25).value,
0.153859398, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 0.5, 14.25).value,
0.123550552, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 0.5, 14.25).value,
0.052996133, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 0.3, 14.25).value,
0.156616572, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 0.3, 14.25).value,
0.125325192, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 0.3, 14.25).value,
0.053871006, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 0.2, 14.25).value,
0.158958354, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 0.2, 14.25).value,
0.126766365, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 0.2, 14.25).value,
0.054721343, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 1.0, 29).value,
0.683528163, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 1.0, 29).value,
0.555168022, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 1.0, 29).value,
0.188559832, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 0.5, 29).value,
0.704836196, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 0.5, 29).value,
0.565993797, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 0.5, 29).value,
0.193687836, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 0.3, 29).value,
0.717323975, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 0.3, 29).value,
0.574044911, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 0.3, 29).value,
0.19687619, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 0.2, 29).value,
0.727927181, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 0.2, 29).value,
0.580581723, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 0.2, 29).value,
0.19997458, places=5)
class ITUR836_6TestCase(test.TestCase):
def setUp(self):
models.itu836.change_version(6)
def test_surface_water_vapour_density(self):
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
3.133, 101.7, 0.1, 0.236104459).value,
22.93756598, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
3.133, 101.7, 0.15, 0.236104459).value,
22.80534575, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
3.133, 101.7, 0.3, 0.236104459).value,
22.55507955, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
3.133, 101.7, 0.35, 0.236104459).value,
22.49361957, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
22.9, -43.23, 0.1, 0).value,
21.59164912, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
22.9, -43.23, 0.15, 0).value,
21.46164369, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
22.9, -43.23, 0.3, 0).value,
21.24753319, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
22.9, -43.23, 0.35, 0).value,
21.18676013, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
23, 30, 0.1, 0.247).value,
11.88170822, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
23, 30, 0.15, 0.247).value,
11.61777268, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
23, 30, 0.3, 0.247).value,
11.12235912, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
23, 30, 0.35, 0.247).value,
11.00877052, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
25.78, -80.22, 0.1, 7.51071e-05).value,
23.50748104, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
25.78, -80.22, 0.15, 7.51071e-05).value,
23.34324475, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
25.78, -80.22, 0.3, 7.51071e-05).value,
23.06574222, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
25.78, -80.22, 0.35, 7.51071e-05).value,
23.00327243, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
28.717, 77.3, 0.1, 0.217559455).value,
25.95287453, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
28.717, 77.3, 0.15, 0.217559455).value,
25.71217873, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
28.717, 77.3, 0.3, 0.217559455).value,
25.34018758, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
28.717, 77.3, 0.35, 0.217559455).value,
25.2557054, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
33.94, 18.43, 0.1, 0).value,
24.00156532, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
33.94, 18.43, 0.15, 0).value,
23.85987554, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
33.94, 18.43, 0.3, 0).value,
23.51464505, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
33.94, 18.43, 0.35, 0).value,
23.41954477, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
41.9, 12.49, 0.1, 0.056701045).value,
19.78501126, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
41.9, 12.49, 0.15, 0.056701045).value,
19.48948848, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
41.9, 12.49, 0.3, 0.056701045).value,
19.02450953, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
41.9, 12.49, 0.35, 0.056701045).value,
18.92055161, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
51.5, -0.14, 0.1, 0.069164224).value,
15.21351315, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
51.5, -0.14, 0.15, 0.069164224).value,
15.0172773, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
51.5, -0.14, 0.3, 0.069164224).value,
14.6189506, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
51.5, -0.14, 0.35, 0.069164224).value,
14.50640729, places=5)
def test_total_water_vapour_content(self):
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
3.133, 101.7, 0.1, 0.23610446).value,
62.16532093, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
3.133, 101.7, 0.15, 0.23610446).value,
61.59527521, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
3.133, 101.7, 0.3, 0.23610446).value,
60.58285243, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
3.133, 101.7, 0.35, 0.23610446).value,
60.35619302, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
22.9, -43.23, 0.1, 0.0).value,
56.38788554, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
22.9, -43.23, 0.15, 0.0).value,
55.36064664, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
22.9, -43.23, 0.3, 0.0).value,
53.4851113, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
22.9, -43.23, 0.35, 0.0).value,
53.03918259, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
23, 30, 0.1, 0.247).value,
38.47288189, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
23, 30, 0.15, 0.247).value,
37.21449337, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
23, 30, 0.3, 0.247).value,
34.63093178, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
23, 30, 0.35, 0.247).value,
34.06569649, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
25.78, -80.22, 0.1, 7.511e-05).value,
62.84315177, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
25.78, -80.22, 0.15, 7.511e-05).value,
61.95641322, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
25.78, -80.22, 0.3, 7.511e-05).value,
60.48487688, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
25.78, -80.22, 0.35, 7.511e-05).value,
60.1561742, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
28.717, 77.3, 0.1, 0.21755946).value,
75.44891006, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
28.717, 77.3, 0.15, 0.21755946).value,
74.79639702, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
28.717, 77.3, 0.3, 0.21755946).value,
73.40408393, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
28.717, 77.3, 0.35, 0.21755946).value,
73.07234727, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
33.94, 18.43, 0.1, 0.0).value,
45.19895208, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
33.94, 18.43, 0.15, 0.0).value,
44.15275162, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
33.94, 18.43, 0.3, 0.0).value,
42.21022387, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
33.94, 18.43, 0.35, 0.0).value,
41.69772633, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
41.9, 12.49, 0.1, 0.05670104).value,
39.93693588, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
41.9, 12.49, 0.15, 0.05670104).value,
39.33984158, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
41.9, 12.49, 0.3, 0.05670104).value,
38.19321515, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
41.9, 12.49, 0.35, 0.05670104).value,
37.94621912, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
51.5, -0.14, 0.1, 0.06916422).value,
39.23803432, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
51.5, -0.14, 0.15, 0.06916422).value,
38.41414987, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
51.5, -0.14, 0.3, 0.06916422).value,
36.88058222, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
51.5, -0.14, 0.35, 0.06916422).value,
36.4074561, places=5)
class ITUR838_3TestCase(test.TestCase):
def setUp(self):
models.itu838.change_version(3)
def test_rain_specific_attenuation(self):
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 14.25, 30.87067768, 0).value,
1.879742, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 14.25, 40.97052773, 0).value,
3.630988, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 14.25, 47.91280491, 0).value,
3.503189, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 14.25, 30.87067768, 0).value,
1.879742, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 14.25, 40.97052773, 0).value,
3.630988, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 14.25, 47.91280491, 0).value,
3.503189, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 14.25, 30.87067768, 0).value,
1.879742, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 14.25, 40.97052773, 0).value,
3.630988, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 14.25, 47.91280491, 0).value,
3.503189, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 14.25, 30.87067768, 0).value,
1.879742, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 14.25, 40.97052773, 0).value,
3.630988, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 14.25, 47.91280491, 0).value,
3.503189, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 29.00, 30.87067768, 0).value,
5.814832, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 29.00, 40.97052773, 0).value,
10.157375, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 29.00, 47.91280491, 0).value,
9.846762, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 29.00, 30.87067768, 0).value,
5.814832, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 29.00, 40.97052773, 0).value,
10.157375, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 29.00, 47.91280491, 0).value,
9.846762, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 29.00, 30.87067768, 0).value,
5.814832, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 29.00, 40.97052773, 0).value,
10.157375, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 29.00, 47.91280491, 0).value,
9.846762, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 29.00, 30.87067768, 0).value,
5.814832, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 29.00, 40.97052773, 0).value,
10.157375, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 29.00, 47.91280491, 0).value,
9.846762, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 14.25, 59.81487174, 0).value,
3.628282, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 14.25, 49.20900369, 0).value,
5.948478, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 14.25, 59.81487174, 0).value,
3.628282, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 14.25, 49.20900369, 0).value,
5.948478, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 14.25, 59.81487174, 0).value,
3.628282, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 14.25, 49.20900369, 0).value,
5.948478, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 14.25, 59.81487174, 0).value,
3.628282, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 14.25, 49.20900369, 0).value,
5.948478, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 29.00, 59.81487174, 0).value,
10.132682, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 29.00, 49.20900369, 0).value,
15.460212, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 29.00, 59.81487174, 0).value,
10.132682, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 29.00, 49.20900369, 0).value,
15.460212, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 29.00, 59.81487174, 0).value,
10.132682, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 29.00, 49.20900369, 0).value,
15.460212, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 29.00, 59.81487174, 0).value,
10.132682, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 29.00, 49.20900369, 0).value,
15.460212, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 14.25, 55.90591362, 0).value,
3.603569, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 14.25, 67.76751981, 0).value,
6.06336, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 14.25, 38.14104832, 0).value,
3.523996, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 14.25, 55.90591362, 0).value,
3.603569, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 14.25, 67.76751981, 0).value,
6.06336, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 14.25, 38.14104832, 0).value,
3.523996, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 14.25, 55.90591362, 0).value,
3.603569, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 14.25, 67.76751981, 0).value,
6.06336, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 14.25, 38.14104832, 0).value,
3.523996, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 14.25, 55.90591362, 0).value,
3.603569, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 14.25, 67.76751981, 0).value,
6.06336, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 14.25, 38.14104832, 0).value,
3.523996, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 29.00, 55.90591362, 0).value,
10.078266, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 29.00, 67.76751981, 0).value,
15.712442, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 29.00, 38.14104832, 0).value,
9.904098, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 29.00, 55.90591362, 0).value,
10.078266, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 29.00, 67.76751981, 0).value,
15.712442, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 29.00, 38.14104832, 0).value,
9.904098, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 29.00, 55.90591362, 0).value,
10.078266, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 29.00, 67.76751981, 0).value,
15.712442, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 29.00, 38.14104832, 0).value,
9.904098, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 29.00, 55.90591362, 0).value,
10.078266, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 29.00, 67.76751981, 0).value,
15.712442, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 29.00, 38.14104832, 0).value,
9.904098, places=5)
# New values in validation 4
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 14.25, 31.07694309, 0).value,
1.581308489, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 14.25, 40.23202374, 0).value,
2.06173217, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 14.25, 46.35969261, 0).value,
1.592084199, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 14.25, 31.07694309, 0).value,
1.581308489, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 14.25, 40.23202374, 0).value,
2.06173217, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 14.25, 46.35969261, 0).value,
1.592084199, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 14.25, 31.07694309, 0).value,
1.581308489, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 14.25, 40.23202374, 0).value,
2.06173217, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 14.25, 46.35969261, 0).value,
1.592084199, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 14.25, 31.07694309, 0).value,
1.581308489, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 14.25, 40.23202374, 0).value,
2.06173217, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 14.25, 46.35969261, 0).value,
1.592084199, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 29, 31.07694309, 0).value,
5.021802196, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 29, 40.23202374, 0).value,
6.278460355, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 29, 46.35969261, 0).value,
5.031354793, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 29, 31.07694309, 0).value,
5.021802196, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 29, 40.23202374, 0).value,
6.278460355, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 29, 46.35969261, 0).value,
5.031354793, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 29, 31.07694309, 0).value,
5.021802196, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 29, 40.23202374, 0).value,
6.278460355, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 29, 46.35969261, 0).value,
5.031354793, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 29, 31.07694309, 0).value,
5.021802196, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 29, 40.23202374, 0).value,
6.278460355, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 29, 46.35969261, 0).value,
5.031354793, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 14.25, 22.27833468, 0).value,
3.321396378, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 14.25, 52.6789929, 0).value,
5.11503455, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 14.25, 22.27833468, 0).value,
3.321396378, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 14.25, 52.6789929, 0).value,
5.11503455, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 14.25, 22.27833468, 0).value,
3.321396378, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 14.25, 52.6789929, 0).value,
5.11503455, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 14.25, 22.27833468, 0).value,
3.321396378, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 14.25, 52.6789929, 0).value,
5.11503455, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 29, 22.27833468, 0).value,
9.424302438, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 29, 52.6789929, 0).value,
13.59290067, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 29, 22.27833468, 0).value,
9.424302438, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 29, 52.6789929, 0).value,
13.59290067, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 29, 22.27833468, 0).value,
9.424302438, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 29, 52.6789929, 0).value,
13.59290067, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 29, 22.27833468, 0).value,
9.424302438, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 29, 52.6789929, 0).value,
13.59290067, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 14.25, 48.23861222, 90).value,
3.72899602, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 14.25, 85.80767474, 90).value,
6.340652096, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 14.25, 20.14348033, 90).value,
2.350323497, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 14.25, 48.23861222, 90).value,
3.72899602, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 14.25, 85.80767474, 90).value,
6.340652096, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 14.25, 20.14348033, 90).value,
2.350323497, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 14.25, 48.23861222, 90).value,
3.72899602, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 14.25, 85.80767474, 90).value,
6.340652096, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 14.25, 20.14348033, 90).value,
2.350323497, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 14.25, 48.23861222, 90).value,
3.72899602, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 14.25, 85.80767474, 90).value,
6.340652096, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 14.25, 20.14348033, 90).value,
2.350323497, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 29, 48.23861222, 90).value,
10.28694456, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 29, 85.80767474, 90).value,
16.31838263, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 29, 20.14348033, 90).value,
6.833646475, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 29, 48.23861222, 90).value,
10.28694456, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 29, 85.80767474, 90).value,
16.31838263, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 29, 20.14348033, 90).value,
6.833646475, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 29, 48.23861222, 90).value,
10.28694456, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 29, 85.80767474, 90).value,
16.31838263, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 29, 20.14348033, 90).value,
6.833646475, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 29, 48.23861222, 90).value,
10.28694456, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 29, 85.80767474, 90).value,
16.31838263, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 29, 20.14348033, 90).value,
6.833646475, places=5)
class ITUR837_6TestCase(test.TestCase):
def setUp(self):
models.itu837.change_version(6)
def test_rainfall_rate(self):
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.500, 359.86, 0.01).value,
30.8750240, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.900, 12.49, 0.01).value,
56.3700090, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.940, 18.43, 0.01).value,
55.2316250, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.500, 359.86, 0.01).value,
30.8750240, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.900, 12.49, 0.01).value,
56.3700090, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.940, 18.43, 0.01).value,
55.2316250, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.900, 316.77, 0.01).value,
58.0942160, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.780, 279.78, 0.01).value,
89.1141030, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.900, 316.77, 0.01).value,
58.0942160, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.780, 279.78, 0.01).value,
89.1141030, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.30, 0.01).value,
57.3962300, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.70, 0.01).value,
93.6070980, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(9.050, 38.70, 0.01).value,
54.6234110, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.30, 0.01).value,
57.3962300, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.70, 0.01).value,
93.6070980, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(9.050, 38.70, 0.01).value,
54.6234110, places=5)
class ITUR837_7TestCase(test.TestCase):
def setUp(self):
models.itu837.change_version(7)
def test_rainfall_rate(self):
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.7, 0.1).value,
34.64798123, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.7, 0.15).value,
27.7636201, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.7, 0.3).value,
18.26254364, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.7, 0.35).value,
16.49493229, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.9, -43.23, 0.1).value,
14.58963041, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.9, -43.23, 0.15).value,
11.00510082, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.9, -43.23, 0.3).value,
6.23796236, places=2)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.9, -43.23, 0.35).value,
5.38239642, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(23, 30, 0.1).value,
0.0, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(23, 30, 0.15).value,
0.0, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(23, 30, 0.3).value,
0.0, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(23, 30, 0.35).value,
0.0, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.78, -80.22, 0.1).value,
25.33888119, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.78, -80.22, 0.15).value,
19.86683577, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.78, -80.22, 0.3).value,
12.43676554, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.78, -80.22, 0.35).value,
11.07566126, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.3, 0.1).value,
16.53857378, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.3, 0.15).value,
12.04651363, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.3, 0.3).value,
6.21600589, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.3, 0.35).value,
5.19609765, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.94, 18.43, 0.1).value,
7.43193175, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.94, 18.43, 0.15).value,
5.53031864, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.94, 18.43, 0.3).value,
3.03506603, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.94, 18.43, 0.35).value,
2.59276061, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.9, 12.49, 0.1).value,
11.19798305, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.9, 12.49, 0.15).value,
8.88472572, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.9, 12.49, 0.3).value,
5.75356253, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.9, 12.49, 0.35).value,
5.18058827, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.5, -0.14, 0.1).value,
8.9924712, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.5, -0.14, 0.15).value,
7.17369312, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.5, -0.14, 0.3).value,
4.69033625, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.5, -0.14, 0.35).value,
4.23258601, places=3)
def test_rainfall_probability(self):
self.assertAlmostEqual(
models.itu837.rainfall_probability(3.133, 101.7).value,
4.53654368, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(22.9, -43.23).value,
1.41773353, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(23, 30).value,
0.00051911, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(25.78, -80.22).value,
2.90785192, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(28.717, 77.3).value,
1.07089363, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(33.94, 18.43).value,
1.27567391, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(41.9, 12.49).value,
5.26971907, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(51.5, -0.14).value,
5.3615096, places=5)
def test_rainfall_rate_R001(self):
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.7, 0.01).value,
99.1481136, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.9, -43.23, 0.01).value,
50.639304, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(23.0, 30.0, 0.01).value,
0.0, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.78, -80.22, 0.01).value,
78.2982928, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.3, 0.01).value,
63.5972464, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.94, 18.43, 0.01).value,
27.1349664, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.9, 12.49, 0.01).value,
33.936232, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.5, -0.14, 0.01).value,
26.48052, places=5)
class ITUR839_4TestCase(test.TestCase):
def setUp(self):
models.itu839.change_version(4)
def test_isoterm_0_deg(self):
self.assertAlmostEqual(
models.itu839.isoterm_0(3.133, 101.7).value,
4.5979744, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(22.9, -43.23).value,
3.79877867, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(23, 30).value,
4.168, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(25.78, -80.22).value,
4.20946133, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(28.717, 77.3).value,
4.89820404, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(33.94, 18.43).value,
2.20330276, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(41.9, 12.49).value,
2.68749333, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(51.5, -0.14).value,
2.09273333, places=5)
def test_rain_height(self):
self.assertAlmostEqual(
models.itu839.rain_height(51.500, 359.86).value,
2.4527330, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(41.900, 12.49).value,
3.0474930, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(33.940, 18.43).value,
2.5633030, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(51.500, 359.86).value,
2.4527330, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(41.900, 12.49).value,
3.0474930, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(33.940, 18.43).value,
2.5633030, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(22.900, 316.77).value,
4.1587790, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(25.780, 279.78).value,
4.5694610, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(22.900, 316.77).value,
4.1587790, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(25.780, 279.78).value,
4.5694610, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(28.717, 77.30).value,
5.2582040, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(3.133, 101.70).value,
4.9579740, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(9.050, 38.70).value,
4.7839070, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(28.717, 77.30).value,
5.2582040, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(3.133, 101.70).value,
4.9579740, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(9.050, 38.70).value,
4.7839070, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(3.133, 101.7).value,
4.9579744, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(22.9, -43.23).value,
4.15877867, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(23, 30).value,
4.528, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(25.78, -80.22).value,
4.56946133, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(28.717, 77.3).value,
5.25820404, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(33.94, 18.43).value,
2.56330276, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(41.9, 12.49).value,
3.04749333, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(51.5, -0.14).value,
2.45273333, places=5)
class ITUR618_12TestCase(test.TestCase):
def setUp(self):
models.itu618.change_version(12)
models.itu453.change_version(12)
models.itu838.change_version(3)
models.itu836.change_version(5)
models.itu837.change_version(6)
models.itu840.change_version(6)
def test_rain_cross_polarization_discrimination(self):
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
16.38308757, 14.25, 30.870677680, 0.001, 0).value,
27.143007980, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
3.89479806, 14.25, 40.970527730, 0.1, 0).value,
37.386086000, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
9.71179484, 14.25, 47.912804910, 0.01, 0).value,
33.812795580, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
71.44613350, 29, 40.970527730, 0.001, 0).value,
21.244470560, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
12.87478397, 29, 47.912804910, 0.1, 0).value,
35.166125690, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
39.07323323, 29, 40.970527730, 0.01, 0).value,
25.180145740, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
23.00197384, 14.25, 59.814871740, 0.001, 0).value,
33.308530550, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
32.74150676, 14.25, 49.209003690, 0.001, 0).value,
25.508227320, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
4.92489694, 14.25, 59.814871740, 0.1, 0).value,
41.798127850, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
6.96606559, 14.25, 49.209003690, 0.1, 0).value,
34.830206060, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
12.76053997, 14.25, 59.814871740, 0.01, 0).value,
36.168649690, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
18.06938866, 14.25, 49.209003690, 0.01, 0).value,
28.803871260, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
23.00197384, 14.25, 59.814871740, 0.001, 0).value,
33.308530550, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
32.74150676, 14.25, 49.209003690, 0.001, 0).value,
25.508227320, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
100.96022257, 29, 49.209003690, 0.001, 0).value,
20.365001500, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
20.43214239, 29, 59.814871740, 0.1, 0).value,
35.581135690, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
27.86774318, 29, 49.209003690, 0.1, 0).value,
28.745547830, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
46.32024457, 29, 59.814871740, 0.01, 0).value,
30.303830010, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
63.46384760, 29, 49.209003690, 0.01, 0).value,
23.046241580, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
73.05533363, 29, 59.814871740, 0.001, 0).value,
28.089155910, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
26.85402570, 14.25, 55.905913620, 0.001, 0).value,
29.993601830, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
4.44533923, 14.25, 38.141048320, 0.1, 0).value,
35.652315760, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
11.06265445, 14.25, 38.141048320, 0.01, 0).value,
30.034285750, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
26.85402570, 14.25, 55.905913620, 0.001, 0).value,
29.993601830, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
21.84602116, 29, 55.905913620, 0.1, 0).value,
33.289964560, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
51.72271818, 29, 55.905913620, 0.01, 0).value,
27.480618010, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
53.61322867, 29, 38.141048320, 0.001, 0).value,
23.354012700, places=5)
def test_rain_attenuation(self):
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 14.25, 30.87067768,
hs=0.0691640, p=0.01, tau=0.00).value,
7.5572640, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 14.25, 40.97052773,
hs=0.0567010, p=0.01, tau=0.00).value,
11.4735460, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 14.25, 47.91280491,
hs=0.0000000, p=0.01, tau=0.00).value,
9.7117950, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 29.00, 30.87067768,
hs=0.0691640, p=0.01, tau=0.00).value,
25.7166770, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 29.00, 40.97052773,
hs=0.0567010, p=0.01, tau=0.00).value,
39.0732330, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 29.00, 47.91280491,
hs=0.0000000, p=0.01, tau=0.00).value,
33.4169840, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 14.25, 59.81487174,
hs=0.0000000, p=0.01, tau=0.00).value,
12.7605400, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 14.25, 49.20900369,
hs=0.0000750, p=0.01, tau=0.00).value,
18.0693890, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 29.00, 59.81487174,
hs=0.0000000, p=0.01, tau=0.00).value,
46.3202450, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 29.00, 49.20900369,
hs=0.0000750, p=0.01, tau=0.00).value,
63.4638480, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 14.25, 55.90591362,
hs=0.2175590, p=0.01, tau=0.00).value,
14.1707990, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 14.25, 67.76751981,
hs=0.2361040, p=0.01, tau=0.00).value,
19.6617050, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 14.25, 38.14104832,
hs=2.4500050, p=0.01, tau=0.00).value,
11.0626540, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 29.00, 55.90591362,
hs=0.2175590, p=0.01, tau=0.00).value,
51.7227180, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 29.00, 67.76751981,
hs=0.2361040, p=0.01, tau=0.00).value,
70.5396050, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 29.00, 38.14104832,
hs=2.4500050, p=0.01, tau=0.00).value,
35.1160650, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 14.25, 30.87067768,
hs=0.0691640, p=0.10, tau=0.00).value,
2.4567600, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 14.25, 40.97052773,
hs=0.0567010, p=0.10, tau=0.00).value,
3.8947980, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 14.25, 47.91280491,
hs=0.0000000, p=0.10, tau=0.00).value,
3.2920370, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 29.00, 30.87067768,
hs=0.0691640, p=0.10, tau=0.00).value,
9.4912070, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 29.00, 40.97052773,
hs=0.0567010, p=0.10, tau=0.00).value,
15.0594580, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 29.00, 47.91280491,
hs=0.0000000, p=0.10, tau=0.00).value,
12.8747840, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 14.25, 59.81487174,
hs=0.0000000, p=0.10, tau=0.00).value,
4.9248970, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 14.25, 49.20900369,
hs=0.0000750, p=0.10, tau=0.00).value,
6.9660660, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 29.00, 59.81487174,
hs=0.0000000, p=0.10, tau=0.00).value,
20.4321420, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 29.00, 49.20900369,
hs=0.0000750, p=0.10, tau=0.00).value,
27.8677430, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 14.25, 55.90591362,
hs=0.2175590, p=0.10, tau=0.00).value,
5.2338740, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 14.25, 67.76751981,
hs=0.2361040, p=0.10, tau=0.00).value,
9.6728110, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 14.25, 38.14104832,
hs=2.4500050, p=0.10, tau=0.00).value,
4.4453390, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 29.00, 55.90591362,
hs=0.2175590, p=0.10, tau=0.00).value,
21.8460210, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 29.00, 67.76751981,
hs=0.2361040, p=0.10, tau=0.00).value,
39.6143120, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 29.00, 38.14104832,
hs=2.4500050, p=0.10, tau=0.00).value,
15.9048720, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 14.25, 30.87067768,
hs=0.0691640, p=1.00, tau=0.00).value,
0.5628470, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 14.25, 40.97052773,
hs=0.0567010, p=1.00, tau=0.00).value,
0.9317550, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 14.25, 47.91280491,
hs=0.0000000, p=1.00, tau=0.00).value,
0.7619040, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 29.00, 30.87067768,
hs=0.0691640, p=1.00, tau=0.00).value,
2.4686380, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 29.00, 40.97052773,
hs=0.0567010, p=1.00, tau=0.00).value,
4.0904280, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 29.00, 47.91280491,
hs=0.0000000, p=1.00, tau=0.00).value,
3.3867490, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 14.25, 59.81487174,
hs=0.0000000, p=1.00, tau=0.00).value,
1.0593540, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 14.25, 49.20900369,
hs=0.0000750, p=1.00, tau=0.00).value,
1.6122160, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 29.00, 59.81487174,
hs=0.0000000, p=1.00, tau=0.00).value,
5.0231130, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 29.00, 49.20900369,
hs=0.0000750, p=1.00, tau=0.00).value,
7.3463010, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 14.25, 55.90591362,
hs=0.2175590, p=1.00, tau=0.00).value,
1.2022670, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 14.25, 67.76751981,
hs=0.2361040, p=1.00, tau=0.00).value,
1.7852610, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 14.25, 38.14104832,
hs=2.4500050, p=1.00, tau=0.00).value,
0.8916230, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 29.00, 55.90591362,
hs=0.2175590, p=1.00, tau=0.00).value,
5.7386810, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 29.00, 67.76751981,
hs=0.2361040, p=1.00, tau=0.00).value,
8.3461990, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 29.00, 38.14104832,
hs=2.4500050, p=1.00, tau=0.00).value,
3.5957140, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 14.25, 30.87067768,
hs=0.0691640, p=0.01, tau=0.00).value,
7.5572640, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 14.25, 40.97052773,
hs=0.0567010, p=0.01, tau=0.00).value,
11.4735460, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 14.25, 47.91280491,
hs=0.0000000, p=0.01, tau=0.00).value,
9.7117950, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 29.00, 30.87067768,
hs=0.0691640, p=0.01, tau=0.00).value,
25.7166770, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 29.00, 40.97052773,
hs=0.0567010, p=0.01, tau=0.00).value,
39.0732330, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 29.00, 47.91280491,
hs=0.0000000, p=0.01, tau=0.00).value,
33.4169840, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 14.25, 59.81487174,
hs=0.0000000, p=0.01, tau=0.00).value,
12.7605400, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 14.25, 49.20900369,
hs=0.0000750, p=0.01, tau=0.00).value,
18.0693890, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 29.00, 59.81487174,
hs=0.0000000, p=0.01, tau=0.00).value,
46.3202450, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 29.00, 49.20900369,
hs=0.0000750, p=0.01, tau=0.00).value,
63.4638480, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 14.25, 55.90591362,
hs=0.2175590, p=0.01, tau=0.00).value,
14.1707990, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 14.25, 67.76751981,
hs=0.2361040, p=0.01, tau=0.00).value,
19.6617050, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 14.25, 38.14104832,
hs=2.4500050, p=0.01, tau=0.00).value,
11.0626540, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 29.00, 55.90591362,
hs=0.2175590, p=0.01, tau=0.00).value,
51.7227180, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 29.00, 67.76751981,
hs=0.2361040, p=0.01, tau=0.00).value,
70.5396050, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 29.00, 38.14104832,
hs=2.4500050, p=0.01, tau=0.00).value,
35.1160650, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 14.25, 30.87067768,
hs=0.0691640, p=0.10, tau=0.00).value,
2.4567600, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 14.25, 40.97052773,
hs=0.0567010, p=0.10, tau=0.00).value,
3.8947980, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 14.25, 47.91280491,
hs=0.0000000, p=0.10, tau=0.00).value,
3.2920370, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 29.00, 30.87067768,
hs=0.0691640, p=0.10, tau=0.00).value,
9.4912070, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 29.00, 40.97052773,
hs=0.0567010, p=0.10, tau=0.00).value,
15.0594580, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 29.00, 47.91280491,
hs=0.0000000, p=0.10, tau=0.00).value,
12.8747840, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 14.25, 59.81487174,
hs=0.0000000, p=0.10, tau=0.00).value,
4.9248970, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 14.25, 49.20900369,
hs=0.0000750, p=0.10, tau=0.00).value,
6.9660660, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 29.00, 59.81487174,
hs=0.0000000, p=0.10, tau=0.00).value,
20.4321420, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 29.00, 49.20900369,
hs=0.0000750, p=0.10, tau=0.00).value,
27.8677430, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 14.25, 55.90591362,
hs=0.2175590, p=0.10, tau=0.00).value,
5.2338740, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 14.25, 67.76751981,
hs=0.2361040, p=0.10, tau=0.00).value,
9.6728110, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 14.25, 38.14104832,
hs=2.4500050, p=0.10, tau=0.00).value,
4.4453390, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 29.00, 55.90591362,
hs=0.2175590, p=0.10, tau=0.00).value,
21.8460210, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 29.00, 67.76751981,
hs=0.2361040, p=0.10, tau=0.00).value,
39.6143120, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 29.00, 38.14104832,
hs=2.4500050, p=0.10, tau=0.00).value,
15.9048720, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 14.25, 30.87067768,
hs=0.0691640, p=1.00, tau=0.00).value,
0.5628470, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 14.25, 40.97052773,
hs=0.0567010, p=1.00, tau=0.00).value,
0.9317550, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 14.25, 47.91280491,
hs=0.0000000, p=1.00, tau=0.00).value,
0.7619040, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 29.00, 30.87067768,
hs=0.0691640, p=1.00, tau=0.00).value,
2.4686380, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 29.00, 40.97052773,
hs=0.0567010, p=1.00, tau=0.00).value,
4.0904280, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 29.00, 47.91280491,
hs=0.0000000, p=1.00, tau=0.00).value,
3.3867490, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 14.25, 59.81487174,
hs=0.0000000, p=1.00, tau=0.00).value,
1.0593540, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 14.25, 49.20900369,
hs=0.0000750, p=1.00, tau=0.00).value,
1.6122160, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 29.00, 59.81487174,
hs=0.0000000, p=1.00, tau=0.00).value,
5.0231130, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 29.00, 49.20900369,
hs=0.0000750, p=1.00, tau=0.00).value,
7.3463010, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 14.25, 55.90591362,
hs=0.2175590, p=1.00, tau=0.00).value,
1.2022670, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 14.25, 67.76751981,
hs=0.2361040, p=1.00, tau=0.00).value,
1.7852610, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 14.25, 38.14104832,
hs=2.4500050, p=1.00, tau=0.00).value,
0.8916230, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 29.00, 55.90591362,
hs=0.2175590, p=1.00, tau=0.00).value,
5.7386810, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 29.00, 67.76751981,
hs=0.2361040, p=1.00, tau=0.00).value,
8.3461990, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 29.00, 38.14104832,
hs=2.4500050, p=1.00, tau=0.00).value,
3.5957140, places=5)
def test_scintillation_attenuation(self):
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, 359.86, 14.25, 30.87067768, 0.001, 0.9, eta=0.6).value,
0.866044, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.97052773, 0.001, 0.9, eta=0.6).value,
0.710527, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 47.91280491, 0.001, 0.9, eta=0.6).value,
0.764448, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, 359.86, 29, 30.87067768, 0.001, 0.9, eta=0.6).value,
1.289482, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.97052773, 0.001, 0.9, eta=0.6).value,
1.054611, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 47.91280491, 0.001, 0.9, eta=0.6).value,
1.132606, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, 316.77, 14.25, 59.81487174, 0.001, 0.9, eta=0.6).value,
0.699472, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, 279.78, 14.25, 49.20900369, 0.001, 0.9, eta=0.6).value,
0.912438, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, 316.77, 29, 59.81487174, 0.001, 0.9, eta=0.6).value,
1.033819, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, 279.78, 29, 49.20900369, 0.001, 0.9, eta=0.6).value,
1.351457, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 55.90591362, 0.001, 0.9, eta=0.6).value,
0.571530, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 67.76751981, 0.001, 0.9, eta=0.6).value,
0.736636, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 38.14104832, 0.001, 0.9, eta=0.6).value,
0.733740, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 55.90591362, 0.001, 0.9, eta=0.6).value,
0.845322, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 67.76751981, 0.001, 0.9, eta=0.6).value,
1.087468, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 38.14104832, 0.001, 0.9, eta=0.6).value,
1.089954, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, 359.86, 14.25, 30.87067768, 0.1, 0.9, eta=0.6).value,
0.402326, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.97052773, 0.1, 0.9, eta=0.6).value,
0.330080, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 47.91280491, 0.1, 0.9, eta=0.6).value,
0.355129, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, 359.86, 29, 30.87067768, 0.1, 0.9, eta=0.6).value,
0.599037, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.97052773, 0.1, 0.9, eta=0.6).value,
0.489926, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 47.91280491, 0.1, 0.9, eta=0.6).value,
0.526159, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, 316.77, 14.25, 59.81487174, 0.1, 0.9, eta=0.6).value,
0.324944, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, 279.78, 14.25, 49.20900369, 0.1, 0.9, eta=0.6).value,
0.423879, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, 316.77, 29, 59.81487174, 0.1, 0.9, eta=0.6).value,
0.480267, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, 279.78, 29, 49.20900369, 0.1, 0.9, eta=0.6).value,
0.627828, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 55.90591362, 0.1, 0.9, eta=0.6).value,
0.265508, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 67.76751981, 0.1, 0.9, eta=0.6).value,
0.342209, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 38.14104832, 0.1, 0.9, eta=0.6).value,
0.340864, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 55.90591362, 0.1, 0.9, eta=0.6).value,
0.392700, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 67.76751981, 0.1, 0.9, eta=0.6).value,
0.505190, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 38.14104832, 0.1, 0.9, eta=0.6).value,
0.506345, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, 359.86, 14.25, 30.87067768, 1, 0.9, eta=0.6).value,
0.249221, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.97052773, 1, 0.9, eta=0.6).value,
0.204468, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 47.91280491, 1, 0.9, eta=0.6).value,
0.219985, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, 359.86, 29, 30.87067768, 1, 0.9, eta=0.6).value,
0.371074, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.97052773, 1, 0.9, eta=0.6).value,
0.303485, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 47.91280491, 1, 0.9, eta=0.6).value,
0.325930, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, 316.77, 14.25, 59.81487174, 1, 0.9, eta=0.6).value,
0.201287, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, 279.78, 14.25, 49.20900369, 1, 0.9, eta=0.6).value,
0.262572, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, 316.77, 29, 59.81487174, 1, 0.9, eta=0.6).value,
0.297502, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, 279.78, 29, 49.20900369, 1, 0.9, eta=0.6).value,
0.388909, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 55.90591362, 1, 0.9, eta=0.6).value,
0.164469, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 67.76751981, 1, 0.9, eta=0.6).value,
0.211982, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 38.14104832, 1, 0.9, eta=0.6).value,
0.211148, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 55.90591362, 1, 0.9, eta=0.6).value,
0.243258, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 67.76751981, 1, 0.9, eta=0.6).value,
0.312940, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 38.14104832, 1, 0.9, eta=0.6).value,
0.313656, places=5)
class ITUR618_13TestCase(test.TestCase):
def setUp(self):
models.itu453.change_version(13)
models.itu618.change_version(13)
models.itu676.change_version(11)
models.itu836.change_version(6)
models.itu837.change_version(7)
models.itu838.change_version(3)
models.itu839.change_version(4)
models.itu840.change_version(7)
models.itu1510.change_version(1)
models.itu1511.change_version(1)
def test_rain_attenuation(self):
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 14.25, 31.07694309, p=1.0,
tau=0, R001=26.48052).value,
0.4891464, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 14.25, 40.23202374, p=1.0,
tau=0, R001=33.936232).value,
0.62159245, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 14.25, 46.35969261, p=1.0,
tau=0, R001=27.13586832).value,
0.42101702, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 14.25, 31.07694309, p=0.1,
tau=0, R001=26.48052).value,
2.16093996, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 14.25, 40.23202374, p=0.1,
tau=0, R001=33.936232).value,
2.69015654, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 14.25, 46.35969261, p=0.1,
tau=0, R001=27.13586832).value,
1.91338757, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 14.25, 31.07694309, p=0.01,
tau=0, R001=26.48052).value,
6.72784425, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 14.25, 40.23202374, p=0.01,
tau=0, R001=33.936232).value,
8.20500328, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 14.25, 46.35969261, p=0.01,
tau=0, R001=27.13586832).value,
5.9418061, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 14.25, 31.07694309, p=0.001,
tau=0, R001=26.48052).value,
14.76177358, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 14.25, 40.23202374, p=0.001,
tau=0, R001=33.936232).value,
17.636376, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 14.25, 46.35969261, p=0.001,
tau=0, R001=27.13586832).value,
12.98151687, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 29.0, 31.07694309, p=1.0,
tau=0, R001=26.48052).value,
2.17898357, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 29.0, 40.23202374, p=1.0,
tau=0, R001=33.936232).value,
2.81537632, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 29.0, 46.35969261, p=1.0,
tau=0, R001=27.13586832).value,
1.96063611, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 29.0, 31.07694309, p=0.1,
tau=0, R001=26.48052).value,
8.46779316, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 29.0, 40.23202374, p=0.1,
tau=0, R001=33.936232).value,
10.70289842, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 29.0, 46.35969261, p=0.1,
tau=0, R001=27.13586832).value,
7.80832251, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 29.0, 31.07694309, p=0.01,
tau=0, R001=26.48052).value,
23.1908096, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 29.0, 40.23202374, p=0.01,
tau=0, R001=33.936232).value,
28.67449232, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 29.0, 46.35969261, p=0.01,
tau=0, R001=27.13586832).value,
21.24861968, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 29.0, 31.07694309, p=0.001,
tau=0, R001=26.48052).value,
44.76009125, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 29.0, 40.23202374, p=0.001,
tau=0, R001=33.936232).value,
54.14015005, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 29.0, 46.35969261, p=0.001,
tau=0, R001=27.13586832).value,
40.68133015, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 14.25, 22.27833468, p=1.0,
tau=0, R001=50.639304).value,
1.70690128, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 14.25, 52.6789929, p=1.0,
tau=0, R001=78.2994993).value,
1.43904149, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 14.25, 22.27833468, p=0.1,
tau=0, R001=50.639304).value,
8.27164744, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 14.25, 52.6789929, p=0.1,
tau=0, R001=78.2994993).value,
6.30417186, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 14.25, 22.27833468, p=0.01,
tau=0, R001=50.639304).value,
18.94410356, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 14.25, 52.6789929, p=0.01,
tau=0, R001=78.2994993).value,
16.44617644, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 14.25, 22.27833468, p=0.001,
tau=0, R001=50.639304).value,
29.91171296, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 14.25, 52.6789929, p=0.001,
tau=0, R001=78.2994993).value,
29.95767701, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 29.0, 22.27833468, p=1.0,
tau=0, R001=50.639304).value,
6.81336808, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 29.0, 52.6789929, p=1.0,
tau=0, R001=78.2994993).value,
6.66385625, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 29.0, 22.27833468, p=0.1,
tau=0, R001=50.639304).value,
29.31896844, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 29.0, 52.6789929, p=0.1,
tau=0, R001=78.2994993).value,
25.59455941, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 29.0, 22.27833468, p=0.01,
tau=0, R001=50.639304).value,
59.62576355, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 29.0, 52.6789929, p=0.01,
tau=0, R001=78.2994993).value,
58.53988572, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 29.0, 22.27833468, p=0.001,
tau=0, R001=50.639304).value,
83.5996391, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 29.0, 52.6789929, p=0.001,
tau=0, R001=78.2994993).value,
93.48939944, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 14.25, 48.24116215, p=1.0,
tau=90, R001=63.61888808).value,
1.2731081, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 14.25, 85.80457401, p=1.0,
tau=90, R001=99.15117186).value,
1.93713255, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 14.25, 20.14348033, p=1.0,
tau=90, R001=42.91007183).value,
1.04440572, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 14.25, 48.24116215, p=0.1,
tau=90, R001=63.61888808).value,
5.48101228, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 14.25, 85.80457401, p=0.1,
tau=90, R001=99.15117186).value,
10.67987642, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 14.25, 20.14348033, p=0.1,
tau=90, R001=42.91007183).value,
6.0510347, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 14.25, 48.24116215, p=0.01,
tau=90, R001=63.61888808).value,
14.85903351, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 14.25, 85.80457401, p=0.01,
tau=90, R001=99.15117186).value,
21.03740448, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 14.25, 20.14348033, p=0.01,
tau=90, R001=42.91007183).value,
12.61120361, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 14.25, 48.24116215, p=0.001,
tau=90, R001=63.61888808).value,
28.21372983, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 14.25, 85.80457401, p=0.001,
tau=90, R001=99.15117186).value,
28.13337932, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 14.25, 20.14348033, p=0.001,
tau=90, R001=42.91007183).value,
17.85045772, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 29.0, 48.24116215, p=1.0,
tau=90, R001=63.61888808).value,
5.88085649, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 29.0, 85.80457401, p=1.0,
tau=90, R001=99.15117186).value,
9.84052929, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 29.0, 20.14348033, p=1.0,
tau=90, R001=42.91007183).value,
3.8213237, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 29.0, 48.24116215, p=0.1,
tau=90, R001=63.61888808).value,
22.20219047, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 29.0, 85.80457401, p=0.1,
tau=90, R001=99.15117186).value,
47.18910296, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 29.0, 20.14348033, p=0.1,
tau=90, R001=42.91007183).value,
19.80717661, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 29.0, 48.24116215, p=0.01,
tau=90, R001=63.61888808).value,
52.7819415, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 29.0, 85.80457401, p=0.01,
tau=90, R001=99.15117186).value,
80.85074503, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 29.0, 20.14348033, p=0.01,
tau=90, R001=42.91007183).value,
36.93157357, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 29.0, 48.24116215, p=0.001,
tau=90, R001=63.61888808).value,
87.88505965, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 29.0, 85.80457401, p=0.001,
tau=90, R001=99.15117186).value,
94.0437949, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 29.0, 20.14348033, p=0.001,
tau=90, R001=42.91007183).value,
46.76694226, places=5)
def test_probability_of_rain_attenuation(self):
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
51.5, -0.14, 31.07694309).value,
7.32466089, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
41.9, 12.49, 40.23202374).value,
7.08992377, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
33.94, 18.43, 46.35969261).value,
1.74467895, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
22.9, -43.23, 22.27833468).value,
2.5828985, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
25.78, -80.22, 52.6789929).value,
4.0392312, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
28.717, 77.3, 48.24116215).value,
1.64420965, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
3.133, 101.7, 85.80457401).value,
5.00075505, places=4)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
9.05, 38.7, 20.14348033).value,
7.0357202, places=5)
# def test_site_diversity(self):
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.768, -80.205, 9, 52.40999326,
# 25.463, -80.486, 9, 52.48526958, 14.5, tau=0).value,
# 0.00098637, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.768, -80.205, 9, 52.40999326,
# 25.463, -80.486, 3, 52.48526958, 14.5, tau=0).value,
# 0.0049444, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.768, -80.205, 3, 52.40999326,
# 25.463, -80.486, 9, 52.48526958, 14.5, tau=0).value,
# 0.00503721, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.768, -80.205, 9, 52.40999326,
# 25.463, -80.486, 9, 52.48526958, 18, tau=0).value,
# 0.00513052, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.768, -80.205, 9, 52.40999326,
# 25.463, -80.486, 3, 52.48526958, 18, tau=0).value,
# 0.01982845, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.768, -80.205, 3, 52.40999326,
# 25.463, -80.486, 9, 52.48526958, 18, tau=0).value,
# 0.02027952, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.768, -80.205, 9, 52.40999326,
# 25.463, -80.486, 9, 52.48526958, 29, tau=0).value,
# 0.07543135, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.768, -80.205, 9, 52.40999326,
# 25.463, -80.486, 3, 52.48526958, 29, tau=0).value,
# 0.16564191, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.768, -80.205, 3, 52.40999326,
# 25.463, -80.486, 9, 52.48526958, 29, tau=0).value,
# 0.17005653, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.796, -80.287, 9, 52.33141826,
# 25.889, -80.278, 9, 52.25682688, 29, tau=0).value,
# 0.25228844, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.796, -80.287, 9, 52.33141826,
# 25.889, -80.278, 3, 52.25682688, 29, tau=0).value,
# 0.40360211, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.796, -80.287, 3, 52.33141826,
# 25.889, -80.278, 9, 52.25682688, 29, tau=0).value,
# 0.39740505, places=5)
def test_scintillation_attenuation(self):
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 14.25, 31.07694309, 1, 1, 0.65).value,
0.26193234, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.23202374, 1, 1, 0.65).value,
0.22405226, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 46.35969261, 1, 1, 0.65).value,
0.23279942, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 14.25, 31.07694309, 0.1, 1, 0.65).value,
0.4228461, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.23202374, 0.1, 1, 0.65).value,
0.36169504, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 46.35969261, 0.1, 1, 0.65).value,
0.37581586, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 14.25, 31.07694309, 0.01, 1, 0.65).value,
0.62828836, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.23202374, 0.01, 1, 0.65).value,
0.5374267, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 46.35969261, 0.01, 1, 0.65).value,
0.55840821, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 14.25, 31.07694309, 0.001, 1, 0.65).value,
0.91021486, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.23202374, 0.001, 1, 0.65).value,
0.77858162, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 46.35969261, 0.001, 1, 0.65).value,
0.80897798, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 29, 31.07694309, 1, 1, 0.65).value,
0.38849319, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.23202374, 1, 1, 0.65).value,
0.33115269, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 46.35969261, 1, 1, 0.65).value,
0.34339899, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 29, 31.07694309, 0.1, 1, 0.65).value,
0.62715751, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.23202374, 0.1, 1, 0.65).value,
0.53459083, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 46.35969261, 0.1, 1, 0.65).value,
0.55436043, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 29, 31.07694309, 0.01, 1, 0.65).value,
0.93186567, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.23202374, 0.01, 1, 0.65).value,
0.79432493, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 46.35969261, 0.01, 1, 0.65).value,
0.82369971, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 29, 31.07694309, 0.001, 1, 0.65).value,
1.35001384, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.23202374, 0.001, 1, 0.65).value,
1.15075561, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 46.35969261, 0.001, 1, 0.65).value,
1.19331148, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 14.25, 22.27833468, 1, 1, 0.65).value,
0.62009744, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 14.25, 52.6789929, 1, 1, 0.65).value,
0.2664749, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 14.25, 22.27833468, 0.1, 1, 0.65).value,
1.00104396, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 14.25, 52.6789929, 0.1, 1, 0.65).value,
0.43017931, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 14.25, 22.27833468, 0.01, 1, 0.65).value,
1.48740705, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 14.25, 52.6789929, 0.01, 1, 0.65).value,
0.63918446, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 14.25, 22.27833468, 0.001, 1, 0.65).value,
2.15483859, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 14.25, 52.6789929, 0.001, 1, 0.65).value,
0.92600027, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 29, 22.27833468, 1, 1, 0.65).value,
0.92341029, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 29, 52.6789929, 1, 1, 0.65).value,
0.39237999, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 29, 22.27833468, 0.1, 1, 0.65).value,
1.49069201, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 29, 52.6789929, 0.1, 1, 0.65).value,
0.63343209, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 29, 22.27833468, 0.01, 1, 0.65).value,
2.21495349, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 29, 52.6789929, 0.01, 1, 0.65).value,
0.9411888, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 29, 22.27833468, 0.001, 1, 0.65).value,
3.20885076, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 29, 52.6789929, 0.001, 1, 0.65).value,
1.36352046, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 48.24116215, 1, 1, 0.65).value,
0.2156413, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 85.80457401, 1, 1, 0.65).value,
0.22167129, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 20.14348033, 1, 1, 0.65).value,
0.48533645, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 48.24116215, 0.1, 1, 0.65).value,
0.34811693, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 85.80457401, 0.1, 1, 0.65).value,
0.35785136, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 20.14348033, 0.1, 1, 0.65).value,
0.78349481, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 48.24116215, 0.01, 1, 0.65).value,
0.51725159, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 85.80457401, 0.01, 1, 0.65).value,
0.53171554, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 20.14348033, 0.01, 1, 0.65).value,
1.16416037, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 48.24116215, 0.001, 1, 0.65).value,
0.7493535, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 85.80457401, 0.001, 1, 0.65).value,
0.77030774, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 20.14348033, 0.001, 1, 0.65).value,
1.68654418, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 48.24116215, 1, 1, 0.65).value,
0.31791278, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 85.80457401, 1, 1, 0.65).value,
0.32486881, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 20.14348033, 1, 1, 0.65).value,
0.72351614, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 48.24116215, 0.1, 1, 0.65).value,
0.5132172, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 85.80457401, 0.1, 1, 0.65).value,
0.52444655, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 20.14348033, 0.1, 1, 0.65).value,
1.16799623, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 48.24116215, 0.01, 1, 0.65).value,
0.76256679, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 85.80457401, 0.01, 1, 0.65).value,
0.77925198, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 20.14348033, 0.01, 1, 0.65).value,
1.73547406, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 48.24116215, 0.001, 1, 0.65).value,
1.10474691, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 85.80457401, 0.001, 1, 0.65).value,
1.12891911, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 20.14348033, 0.001, 1, 0.65).value,
2.5142186, places=5)
def test_rain_cross_polarization_discrimination(self):
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
0.4891464, 14.25, 31.07694309, 1.0, 0).value,
49.57582307, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
0.62159245, 14.25, 40.23202374, 1.0, 0).value,
49.3981550, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
0.42101702, 14.25, 46.35969261, 1.0, 0).value,
53.93857057, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
2.16093996, 14.25, 31.07694309, 0.1, 0).value,
40.29800396, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
2.69015654, 14.25, 40.23202374, 0.1, 0).value,
40.28034662, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.91338757, 14.25, 46.35969261, 0.1, 0).value,
44.68265675, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
6.72784425, 14.25, 31.07694309, 0.01, 0).value,
32.97842659, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
8.20500328, 14.25, 40.23202374, 0.01, 0).value,
33.13972017, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
5.9418061, 14.25, 46.35969261, 0.01, 0).value,
37.62918682, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
14.76177358, 14.25, 31.07694309, 0.001, 0).value,
28.14021762, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
17.636376, 14.25, 40.23202374, 0.001, 0).value,
28.49940232, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
12.98151687, 14.25, 46.35969261, 0.001, 0).value,
33.07510332, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
2.17898357, 29.0, 31.07694309, 1.0, 0).value,
44.30006506, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
2.81537632, 29.0, 40.23202374, 1.0, 0).value,
43.8603725, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.96063611, 29.0, 46.35969261, 1.0, 0).value,
48.36964892, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
8.46779316, 29.0, 31.07694309, 0.1, 0).value,
35.03444, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
10.70289842, 29.0, 40.23202374, 0.1, 0).value,
34.76315732, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
7.80832251, 29.0, 46.35969261, 0.1, 0).value,
39.12690283, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
23.1908096, 29.0, 31.07694309, 0.01, 0).value,
27.96431726, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
28.67449232, 29.0, 40.23202374, 0.01, 0).value,
27.8830305, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
21.24861968, 29.0, 46.35969261, 0.01, 0).value,
32.34366876, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
44.76009125, 29.0, 31.07694309, 0.001, 0).value,
23.64462724, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
54.14015005, 29.0, 40.23202374, 0.001, 0).value,
23.7749224, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
40.68133015, 29.0, 46.35969261, 0.001, 0).value,
28.33381119, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.70690128, 14.25, 22.27833468, 1.0, 0).value,
38.65072987, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.43904149, 14.25, 52.6789929, 1.0, 0).value,
46.23051298, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
8.27164744, 14.25, 22.27833468, 0.1, 0).value,
27.9634536, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
6.30417186, 14.25, 52.6789929, 0.1, 0).value,
36.82555192, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
18.94410356, 14.25, 22.27833468, 0.01, 0).value,
22.64492814, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
16.44617644, 14.25, 52.6789929, 0.01, 0).value,
30.86009092, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
29.91171296, 14.25, 22.27833468, 0.001, 0).value,
20.29292318, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
29.95767701, 14.25, 52.6789929, 0.001, 0).value,
27.62415271, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
6.81336808, 29.0, 22.27833468, 1.0, 0).value,
33.64688473, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
6.66385625, 29.0, 52.6789929, 1.0, 0).value,
40.0755612, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
29.31896844, 29.0, 22.27833468, 0.1, 0).value,
22.85413903, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
25.59455941, 29.0, 52.6789929, 0.1, 0).value,
30.6650529, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
59.62576355, 29.0, 22.27833468, 0.01, 0).value,
17.88255372, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
58.53988572, 29.0, 52.6789929, 0.01, 0).value,
25.03203051, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
83.5996391, 29.0, 22.27833468, 0.001, 0).value,
16.16922861, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
93.48939944, 29.0, 52.6789929, 0.001, 0).value,
22.41718851, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.2731081, 14.25, 48.24116215, 1.0, 90).value,
45.80237934, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.93713255, 14.25, 85.80457401, 1.0, 90).value,
75.12972446, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.04440572, 14.25, 20.14348033, 1.0, 90).value,
42.28242577, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
5.48101228, 14.25, 48.24116215, 0.1, 90).value,
36.51649699, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
10.67987642, 14.25, 85.80457401, 0.1, 90).value,
65.51910372, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
6.0510347, 14.25, 20.14348033, 0.1, 90).value,
30.32827626, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
14.85903351, 14.25, 48.24116215, 0.01, 90).value,
30.19759496, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
21.03740448, 14.25, 85.80457401, 0.01, 90).value,
63.60558975, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
12.61120361, 14.25, 20.14348033, 0.01, 90).value,
25.96615447, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
28.21372983, 14.25, 48.24116215, 0.001, 90).value,
26.54453432, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
28.13337932, 14.25, 85.80457401, 0.001, 90).value,
64.9390721, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
17.85045772, 14.25, 20.14348033, 0.001, 90).value,
24.79563761, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
5.88085649, 29.0, 48.24116215, 1.0, 90).value,
39.73121592, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
9.84052929, 29.0, 85.80457401, 1.0, 90).value,
68.04931733, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
3.8213237, 29.0, 20.14348033, 1.0, 90).value,
38.25788658, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
22.20219047, 29.0, 48.24116215, 0.1, 90).value,
30.45232663, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
47.18910296, 29.0, 85.80457401, 0.1, 90).value,
58.32352317, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
19.80717661, 29.0, 20.14348033, 0.1, 90).value,
26.0924586, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
52.7819415, 29.0, 48.24116215, 0.01, 90).value,
24.4471052, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
80.85074503, 29.0, 85.80457401, 0.01, 90).value,
56.92074963, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
36.93157357, 29.0, 20.14348033, 0.01, 90).value,
22.11041426, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
87.88505965, 29.0, 48.24116215, 0.001, 90).value,
21.39198393, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
94.0437949, 29.0, 85.80457401, 0.001, 90).value,
59.09547625, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
46.76694226, 29.0, 20.14348033, 0.001, 90).value,
21.61918999, places=5)
def total_attenuation_fcn(self, lat, lon, f, el, p, D, eta, tau,
val_g, val_c, val_r, val_s, val_t):
# The validation function uses the exact method to compute the rainfall
# rate exceeded for 0.01% of the time
R001 = models.itu837.rainfall_rate(lat, lon, 0.01000000001)
A_g, A_c, A_r, A_s, A = itur.atmospheric_attenuation_slant_path(
lat, lon, f, el, p, D, eta=eta, tau=tau, R001=R001,
return_contributions=True)
self.assertAlmostEqual(A_g.value, val_g, places=5)
self.assertAlmostEqual(A_c.value, val_c, places=5)
self.assertAlmostEqual(A_r.value, val_r, places=5)
self.assertAlmostEqual(A_s.value, val_s, places=5)
self.assertAlmostEqual(A.value, val_t, places=5)
def test_total_attenuation(self):
self.total_attenuation_fcn(
51.5, -0.14, 14.25, 31.07694309, 1, 1, 0.65, 0,
0.223693782, 0.45517046, 0.48914539, 0.26193234, 1.203663661)
self.total_attenuation_fcn(
41.9, 12.49, 14.25, 40.23202374, 1, 1, 0.65, 0,
0.184499507, 0.26338517, 0.62159459, 0.22405226, 1.097400703)
self.total_attenuation_fcn(
33.94, 18.43, 14.25, 46.35969261, 1, 1, 0.65, 0,
0.168635988, 0.18779409, 0.42101546, 0.23279942, 0.820437057)
self.total_attenuation_fcn(
51.5, -0.14, 14.25, 31.07694309, 0.1, 1, 0.65, 0,
0.223693782, 0.45517046, 2.16093588, 0.4228461, 2.873752501)
self.total_attenuation_fcn(
41.9, 12.49, 14.25, 40.23202374, 0.1, 1, 0.65, 0,
0.184499507, 0.26338517, 2.69016502, 0.36169504, 3.16011407)
self.total_attenuation_fcn(
33.94, 18.43, 14.25, 46.35969261, 0.1, 1, 0.65, 0,
0.168635988, 0.18779409, 1.91338106, 0.37581586, 2.303155743)
self.total_attenuation_fcn(
51.5, -0.14, 14.25, 31.07694309, 0.01, 1, 0.65, 0,
0.223693782, 0.45517046, 6.72783273, 0.62828836, 7.434122415)
self.total_attenuation_fcn(
41.9, 12.49, 14.25, 40.23202374, 0.01, 1, 0.65, 0,
0.184499507, 0.26338517, 8.20502671, 0.5374267, 8.669947478)
self.total_attenuation_fcn(
33.94, 18.43, 14.25, 46.35969261, 0.01, 1, 0.65, 0,
0.168635988, 0.18779409, 5.94178778, 0.55840821, 6.323600947)
self.total_attenuation_fcn(
51.5, -0.14, 14.25, 31.07694309, 0.001, 1, 0.65, 0,
0.223693782, 0.45517046, 14.76175093, 0.91021486, 15.46781355)
self.total_attenuation_fcn(
41.9, 12.49, 14.25, 40.23202374, 0.001, 1, 0.65, 0,
0.184499507, 0.26338517, 17.63642115, 0.77858162, 18.10123067)
self.total_attenuation_fcn(
33.94, 18.43, 14.25, 46.35969261, 0.001, 1, 0.65, 0,
0.168635988, 0.18779409, 12.981481, 0.80897798, 13.36273512)
self.total_attenuation_fcn(
51.5, -0.14, 29, 31.07694309, 1, 1, 0.65, 0,
0.799999368, 1.77247154, 2.17897957, 0.38849319, 4.770502219)
self.total_attenuation_fcn(
41.9, 12.49, 29, 40.23202374, 1, 1, 0.65, 0,
0.673619867, 1.0256437, 2.81538514, 0.33115269, 4.528897381)
self.total_attenuation_fcn(
33.94, 18.43, 29, 46.35969261, 1, 1, 0.65, 0,
0.62972417, 0.73128577, 1.96062953, 0.34339899, 3.343454224)
self.total_attenuation_fcn(
51.5, -0.14, 29, 31.07694309, 0.1, 1, 0.65, 0,
0.799999368, 1.77247154, 8.46777895, 0.62715751, 11.05943682)
self.total_attenuation_fcn(
41.9, 12.49, 29, 40.23202374, 0.1, 1, 0.65, 0,
0.673619867, 1.0256437, 10.70292908, 0.53459083, 12.41436971)
self.total_attenuation_fcn(
33.94, 18.43, 29, 46.35969261, 0.1, 1, 0.65, 0,
0.62972417, 0.73128577, 7.80829852, 0.55436043, 9.18728313)
self.total_attenuation_fcn(
51.5, -0.14, 29, 31.07694309, 0.01, 1, 0.65, 0,
0.799999368, 1.77247154, 23.19077435, 0.93186567, 25.78063225)
self.total_attenuation_fcn(
41.9, 12.49, 29, 40.23202374, 0.01, 1, 0.65, 0,
0.673619867, 1.0256437, 28.67456675, 0.79432493, 30.38445044)
self.total_attenuation_fcn(
33.94, 18.43, 29, 46.35969261, 0.01, 1, 0.65, 0,
0.62972417, 0.73128577, 21.24856054, 0.82369971, 22.62499923)
self.total_attenuation_fcn(
51.5, -0.14, 29, 31.07694309, 0.001, 1, 0.65, 0,
0.799999368, 1.77247154, 44.76003026, 1.35001384, 47.35208054)
self.total_attenuation_fcn(
41.9, 12.49, 29, 40.23202374, 0.001, 1, 0.65, 0,
0.673619867, 1.0256437, 54.14027603, 1.15075561, 55.85154062)
self.total_attenuation_fcn(
33.94, 18.43, 29, 46.35969261, 0.001, 1, 0.65, 0,
0.62972417, 0.73128577, 40.68122866, 1.19331148, 42.05942781)
self.total_attenuation_fcn(
22.9, -43.23, 14.25, 22.27833468, 1, 1, 0.65, 0,
0.383178724, 0.54183293, 1.70690691, 0.62009744, 2.715849229)
self.total_attenuation_fcn(
25.78, -80.22, 14.25, 52.6789929, 1, 1, 0.65, 0,
0.206227197, 0.53317506, 1.43904233, 0.2664749, 2.196365451)
self.total_attenuation_fcn(
22.9, -43.23, 14.25, 22.27833468, 0.1, 1, 0.65, 0,
0.383178724, 0.54183293, 8.27167236, 1.00104396, 9.253351467)
self.total_attenuation_fcn(
25.78, -80.22, 14.25, 52.6789929, 0.1, 1, 0.65, 0,
0.206227197, 0.53317506, 6.30417519, 0.43017931, 7.057096675)
self.total_attenuation_fcn(
22.9, -43.23, 14.25, 22.27833468, 0.01, 1, 0.65, 0,
0.383178724, 0.54183293, 18.94415527, 1.48740705, 19.92585295)
self.total_attenuation_fcn(
25.78, -80.22, 14.25, 52.6789929, 0.01, 1, 0.65, 0,
0.206227197, 0.53317506, 16.44618432, 0.63918446, 17.1976133)
self.total_attenuation_fcn(
22.9, -43.23, 14.25, 22.27833468, 0.001, 1, 0.65, 0,
0.383178724, 0.54183293, 29.91178614, 2.15483859, 30.91293869)
self.total_attenuation_fcn(
25.78, -80.22, 14.25, 52.6789929, 0.001, 1, 0.65, 0,
0.206227197, 0.53317506, 29.95768987, 0.92600027, 30.71115009)
self.total_attenuation_fcn(
22.9, -43.23, 29, 22.27833468, 1, 1, 0.65, 0,
1.504259763, 2.1099424, 6.81338837, 0.92341029, 10.4752418)
self.total_attenuation_fcn(
25.78, -80.22, 29, 52.6789929, 1, 1, 0.65, 0,
0.827675954, 2.07622792, 6.66385994, 0.39237999, 9.576567189)
self.total_attenuation_fcn(
22.9, -43.23, 29, 22.27833468, 0.1, 1, 0.65, 0,
1.504259763, 2.1099424, 29.31904828, 1.49069201, 32.9685827)
self.total_attenuation_fcn(
25.78, -80.22, 29, 52.6789929, 0.1, 1, 0.65, 0,
0.827675954, 2.07622792, 25.59457239, 0.63343209, 28.50572549)
self.total_attenuation_fcn(
22.9, -43.23, 29, 22.27833468, 0.01, 1, 0.65, 0,
1.504259763, 2.1099424, 59.62591067, 2.21495349, 63.27983401)
self.total_attenuation_fcn(
25.78, -80.22, 29, 52.6789929, 0.01, 1, 0.65, 0,
0.827675954, 2.07622792, 58.53991262, 0.9411888, 61.45112298)
self.total_attenuation_fcn(
22.9, -43.23, 29, 22.27833468, 0.001, 1, 0.65, 0,
1.504259763, 2.1099424, 83.59982398, 3.20885076, 87.2740725)
self.total_attenuation_fcn(
25.78, -80.22, 29, 52.6789929, 0.001, 1, 0.65, 0,
0.827675954, 2.07622792, 93.48943794, 1.36352046, 96.4030686)
self.total_attenuation_fcn(
28.717, 77.3, 14.25, 48.24116215, 1, 1, 0.65, 90,
0.257653026, 0.68592197, 1.27311232, 0.2156413, 2.228519972)
self.total_attenuation_fcn(
3.133, 101.7, 14.25, 85.80457401, 1, 1, 0.65, 90,
0.163655312, 0.62211863, 1.93712821, 0.22167129, 2.732484342)
self.total_attenuation_fcn(
9.05, 38.7, 14.25, 20.14348033, 1, 1, 0.65, 90,
0.22310495, 0.65764822, 1.04440674, 0.48533645, 1.993003982)
self.total_attenuation_fcn(
28.717, 77.3, 14.25, 48.24116215, 0.1, 1, 0.65, 90,
0.257653026, 0.68592197, 5.48102886, 0.34811693, 6.434421439)
self.total_attenuation_fcn(
3.133, 101.7, 14.25, 85.80457401, 0.1, 1, 0.65, 90,
0.163655312, 0.62211863, 10.67985456, 0.35785136, 11.47129236)
self.total_attenuation_fcn(
9.05, 38.7, 14.25, 20.14348033, 0.1, 1, 0.65, 90,
0.22310495, 0.65764822, 6.05104013, 0.78349481, 6.977389778)
self.total_attenuation_fcn(
28.717, 77.3, 14.25, 48.24116215, 0.01, 1, 0.65, 90,
0.257653026, 0.68592197, 14.85907425, 0.51725159, 15.81125251)
self.total_attenuation_fcn(
3.133, 101.7, 14.25, 85.80457401, 0.01, 1, 0.65, 90,
0.163655312, 0.62211863, 21.03736546, 0.53171554, 21.82966493)
self.total_attenuation_fcn(
9.05, 38.7, 14.25, 20.14348033, 0.01, 1, 0.65, 90,
0.22310495, 0.65764822, 12.61121387, 1.16416037, 13.54293868)
self.total_attenuation_fcn(
28.717, 77.3, 14.25, 48.24116215, 0.001, 1, 0.65, 90,
0.257653026, 0.68592197, 28.21379917, 0.7493535, 29.16708769)
self.total_attenuation_fcn(
3.133, 101.7, 14.25, 85.80457401, 0.001, 1, 0.65, 90,
0.163655312, 0.62211863, 28.13333254, 0.77030774, 28.92942223)
self.total_attenuation_fcn(
9.05, 38.7, 14.25, 20.14348033, 0.001, 1, 0.65, 90,
0.22310495, 0.65764822, 17.85047073, 1.68654418, 18.80790784)
self.total_attenuation_fcn(
28.717, 77.3, 29, 48.24116215, 1, 1, 0.65, 90,
1.038585522, 2.67103709, 5.88087518, 0.31791278, 9.596404871)
self.total_attenuation_fcn(
3.133, 101.7, 29, 85.80457401, 1, 1, 0.65, 90,
0.645959831, 2.42258159, 9.84050759, 0.32486881, 12.9133514)
self.total_attenuation_fcn(
9.05, 38.7, 29, 20.14348033, 1, 1, 0.65, 90,
0.703128217, 2.56093676, 3.82132703, 0.72351614, 7.126271267)
self.total_attenuation_fcn(
28.717, 77.3, 29, 48.24116215, 0.1, 1, 0.65, 90,
1.038585522, 2.67103709, 22.20225497, 0.5132172, 25.9171717)
self.total_attenuation_fcn(
3.133, 101.7, 29, 85.80457401, 0.1, 1, 0.65, 90,
0.645959831, 2.42258159, 47.18900784, 0.52444655, 50.26032116)
self.total_attenuation_fcn(
9.05, 38.7, 29, 20.14348033, 0.1, 1, 0.65, 90,
0.703128217, 2.56093676, 19.80719237, 1.16799623, 23.10173121)
self.total_attenuation_fcn(
28.717, 77.3, 29, 48.24116215, 0.01, 1, 0.65, 90,
1.038585522, 2.67103709, 52.78208044, 0.76256679, 56.49694605)
self.total_attenuation_fcn(
3.133, 101.7, 29, 85.80457401, 0.01, 1, 0.65, 90,
0.645959831, 2.42258159, 80.85059735, 0.77925198, 83.92278473)
self.total_attenuation_fcn(
9.05, 38.7, 29, 20.14348033, 0.01, 1, 0.65, 90,
0.703128217, 2.56093676, 36.9316002, 1.73547406, 40.23377893)
self.total_attenuation_fcn(
28.717, 77.3, 29, 48.24116215, 0.001, 1, 0.65, 90,
1.038585522, 2.67103709, 87.88526702, 1.10474691, 91.6016281)
self.total_attenuation_fcn(
3.133, 101.7, 29, 85.80457401, 0.001, 1, 0.65, 90,
0.645959831, 2.42258159, 94.04364093, 1.12891911, 97.11878785)
self.total_attenuation_fcn(
9.05, 38.7, 29, 20.14348033, 0.001, 1, 0.65, 90,
0.703128217, 2.56093676, 46.76697248, 2.5142186, 50.09507012)
# class ITUR840_4TestCase(test.TestCase):
#
# def setUp(self):
# models.itu840.change_version(4)
#
# def test_columnar_content_reduced_liquid(self):
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 51.5, -0.14, 1.000).value,
# 1.26328612, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 41.9, 12.49, 1.000).value,
# 0.91467189, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 33.94, 18.43, 1.000).value,
# 0.73072098, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 51.5, -0.14, 0.100).value,
# 1.90329847, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 41.9, 12.49, 0.100).value,
# 1.49845951, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 33.94, 18.43, 0.100).value,
# 1.47628568, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 51.5, -0.14, 0.010).value,
# 1.90329847, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 41.9, 12.49, 0.010).value,
# 1.49845951, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 33.94, 18.43, 0.010).value,
# 1.47628568, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 51.5, -0.14, 0.001).value,
# 1.90329847, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 41.9, 12.49, 0.001).value,
# 1.49845951, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 33.94, 18.43, 0.001).value,
# 1.47628568, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 51.5, -0.14, 1.000).value,
# 1.26328612, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 41.9, 12.49, 1.000).value,
# 0.91467189, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 33.94, 18.43, 1.000).value,
# 0.73072098, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 51.5, -0.14, 0.100).value,
# 1.90329847, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 41.9, 12.49, 0.100).value,
# 1.49845951, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 33.94, 18.43, 0.100).value,
# 1.47628568, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 51.5, -0.14, 0.010).value,
# 1.90329847, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 41.9, 12.49, 0.010).value,
# 1.49845951, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 33.94, 18.43, 0.010).value,
# 1.47628568, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 51.5, -0.14, 0.001).value,
# 1.90329847, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 41.9, 12.49, 0.001).value,
# 1.49845951, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 33.94, 18.43, 0.001).value,
# 1.47628568, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 22.9, -43.23, 1.000).value,
# 1.10444871, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 25.78, -80.22, 1.000).value,
# 2.27978216, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 22.9, -43.23, 0.100).value,
# 2.82993169, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 25.78, -80.22, 0.100).value,
# 3.52927516, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 22.9, -43.23, 0.010).value,
# 2.82993169, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 25.78, -80.22, 0.010).value,
# 3.52927516, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 22.9, -43.23, 0.001).value,
# 2.82993169, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 25.78, -80.22, 0.001).value,
# 3.52927516, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 22.9, -43.23, 1.000).value,
# 1.10444871, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 25.78, -80.22, 1.000).value,
# 2.27978216, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 22.9, -43.23, 0.100).value,
# 2.82993169, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 25.78, -80.22, 0.100).value,
# 3.52927516, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 22.9, -43.23, 0.010).value,
# 2.82993169, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 25.78, -80.22, 0.010).value,
# 3.52927516, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 22.9, -43.23, 0.001).value,
# 2.82993169, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 25.78, -80.22, 0.001).value,
# 3.52927516, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 28.717, 77.3, 1.000).value,
# 2.75109958, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 3.133, 101.7, 1.000).value,
# 3.33600769, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 9.05, 38.7, 1.000).value,
# 1.21770185, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 28.717, 77.3, 0.100).value,
# 4.23072604, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 3.133, 101.7, 0.100).value,
# 3.80525123, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 9.05, 38.7, 0.100).value,
# 1.49251459, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 28.717, 77.3, 0.010).value,
# 4.23072604, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 3.133, 101.7, 0.010).value,
# 3.80525123, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 9.05, 38.7, 0.010).value,
# 1.49251459, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 28.717, 77.3, 0.001).value,
# 4.23072604, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 3.133, 101.7, 0.001).value,
# 3.80525123, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 9.05, 38.7, 0.001).value,
# 1.49251459, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 28.717, 77.3, 1.000).value,
# 2.75109958, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 3.133, 101.7, 1.000).value,
# 3.33600769, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 9.05, 38.7, 1.000).value,
# 1.21770185, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 28.717, 77.3, 0.100).value,
# 4.23072604, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 3.133, 101.7, 0.100).value,
# 3.80525123, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 9.05, 38.7, 0.100).value,
# 1.49251459, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 28.717, 77.3, 0.010).value,
# 4.23072604, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 3.133, 101.7, 0.010).value,
# 3.80525123, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 9.05, 38.7, 0.010).value,
# 1.49251459, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 28.717, 77.3, 0.001).value,
# 4.23072604, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 3.133, 101.7, 0.001).value,
# 3.80525123, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 9.05, 38.7, 0.001).value,
# 1.49251459, places=5)
#
# def test_cloud_attenuation(self):
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 51.5, -0.14, 30.87067768, 14.25, 1.000).value,
# 0.45792895, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 41.9, 12.49, 40.97052773, 14.25, 1.000).value,
# 0.25946553, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 33.94, 18.43, 47.91280491, 14.25, 1.000).value,
# 0.18313623, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 51.5, -0.14, 30.87067768, 14.25, 0.100).value,
# 0.68992722, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 41.9, 12.49, 40.97052773, 14.25, 0.100).value,
# 0.42506892, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 33.94, 18.43, 47.91280491, 14.25, 0.100).value,
# 0.36999265, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 51.5, -0.14, 30.87067768, 14.25, 0.010).value,
# 0.68992722, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 41.9, 12.49, 40.97052773, 14.25, 0.010).value,
# 0.42506892, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 33.94, 18.43, 47.91280491, 14.25, 0.010).value,
# 0.36999265, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 51.5, -0.14, 30.87067768, 14.25, 0.001).value,
# 0.68992722, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 41.9, 12.49, 40.97052773, 14.25, 0.001).value,
# 0.42506892, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 33.94, 18.43, 47.91280491, 14.25, 0.001).value,
# 0.36999265, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 51.5, -0.14, 30.87067768, 29, 1.000).value,
# 1.79599547, places=2)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 41.9, 12.49, 40.97052773, 29, 1.000).value,
# 1.01762274, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 33.94, 18.43, 47.91280491, 29, 1.000).value,
# 0.71825953, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 51.5, -0.14, 30.87067768, 29, 0.100).value,
# 2.70589171, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 41.9, 12.49, 40.97052773, 29, 0.100).value,
# 1.66711854, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 33.94, 18.43, 47.91280491, 29, 0.100).value,
# 1.45110964, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 51.5, -0.14, 30.87067768, 29, 0.010).value,
# 2.70589171, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 41.9, 12.49, 40.97052773, 29, 0.010).value,
# 1.66711854, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 33.94, 18.43, 47.91280491, 29, 0.010).value,
# 1.45110964, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 51.5, -0.14, 30.87067768, 29, 0.001).value,
# 2.70589171, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 41.9, 12.49, 40.97052773, 29, 0.001).value,
# 1.66711854, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 33.94, 18.43, 47.91280491, 29, 0.001).value,
# 1.45110964, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 22.9, -43.23, 59.81487174, 14.25, 1.000).value,
# 0.23764476, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 25.78, -80.22, 49.20900369, 14.25, 1.000).value,
# 0.56006901, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 22.9, -43.23, 59.81487174, 14.25, 0.100).value,
# 0.60891776, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 25.78, -80.22, 49.20900369, 14.25, 0.100).value,
# 0.86702917, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 22.9, -43.23, 59.81487174, 14.25, 0.010).value,
# 0.60891776, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 25.78, -80.22, 49.20900369, 14.25, 0.010).value,
# 0.86702917, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 22.9, -43.23, 59.81487174, 14.25, 0.001).value,
# 0.60891776, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 25.78, -80.22, 49.20900369, 14.25, 0.001).value,
# 0.86702917, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 22.9, -43.23, 59.81487174, 29, 1.000).value,
# 0.93204177, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 25.78, -80.22, 49.20900369, 29, 1.000).value,
# 2.19658834, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 22.9, -43.23, 59.81487174, 29, 0.100).value,
# 2.38817297, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 25.78, -80.22, 49.20900369, 29, 0.100).value,
# 3.40048483, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 22.9, -43.23, 59.81487174, 29, 0.010).value,
# 2.38817297, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 25.78, -80.22, 49.20900369, 29, 0.010).value,
# 3.40048483, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 22.9, -43.23, 59.81487174, 29, 0.001).value,
# 2.38817297, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 25.78, -80.22, 49.20900369, 29, 0.001).value,
# 3.40048483, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 28.717, 77.3, 55.90591362, 14.25, 1.000).value,
# 0.6178942, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 3.133, 101.7, 67.76751981, 14.25, 1.000).value,
# 0.67031269, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 9.05, 38.7, 38.14104832, 14.25, 1.000).value,
# 0.36671963, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 28.717, 77.3, 55.90591362, 14.25, 0.100).value,
# 0.95021681, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 3.133, 101.7, 67.76751981, 14.25, 0.100).value,
# 0.76459901, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 9.05, 38.7, 38.14104832, 14.25, 0.100).value,
# 0.44948146, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 28.717, 77.3, 55.90591362, 14.25, 0.010).value,
# 0.95021681, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 3.133, 101.7, 67.76751981, 14.25, 0.010).value,
# 0.76459901, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 9.05, 38.7, 38.14104832, 14.25, 0.010).value,
# 0.44948146, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 28.717, 77.3, 55.90591362, 14.25, 0.001).value,
# 0.95021681, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 3.133, 101.7, 67.76751981, 14.25, 0.001).value,
# 0.76459901, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 9.05, 38.7, 38.14104832, 14.25, 0.001).value,
# 0.44948146, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 28.717, 77.3, 55.90591362, 29, 1.000).value,
# 2.4233785, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 3.133, 101.7, 67.76751981, 29, 1.000).value,
# 2.6289636, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 9.05, 38.7, 38.14104832, 29, 1.000).value,
# 1.43827289, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 28.717, 77.3, 55.90591362, 29, 0.100).value,
# 3.72674641, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 3.133, 101.7, 67.76751981, 29, 0.100).value,
# 2.99875418, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 9.05, 38.7, 38.14104832, 29, 0.100).value,
# 1.76286444, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 28.717, 77.3, 55.90591362, 29, 0.010).value,
# 3.72674641, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 3.133, 101.7, 67.76751981, 29, 0.010).value,
# 2.99875418, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 9.05, 38.7, 38.14104832, 29, 0.010).value,
# 1.76286444, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 28.717, 77.3, 55.90591362, 29, 0.001).value,
# 3.72674641, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 3.133, 101.7, 67.76751981, 29, 0.001).value,
# 2.99875418, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 9.05, 38.7, 38.14104832, 29, 0.001).value,
# 1.76286444, places=3)
class ITUR840_7TestCase(test.TestCase):
def setUp(self):
models.itu840.change_version(7)
def test_columnar_content_reduced_liquid(self):
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
3.133, 101.7, 0.1).value,
3.805251208, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
3.133, 101.7, 0.15).value,
3.744512329, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
3.133, 101.7, 0.3).value,
3.630957766, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
3.133, 101.7, 0.35).value,
3.594946111, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
22.9, -43.23, 0.1).value,
2.829931669, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
22.9, -43.23, 0.15).value,
2.615428331, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
22.9, -43.23, 0.3).value,
2.152560931, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
22.9, -43.23, 0.35).value,
2.030424796, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
23, 30, 0.1).value,
0.443821013, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
23, 30, 0.15).value,
0.367758574, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
23, 30, 0.3).value,
0.25249597, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
23, 30, 0.35).value,
0.230476914, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
25.78, -80.22, 0.1).value,
3.52927514, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
25.78, -80.22, 0.15).value,
3.368053109, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
25.78, -80.22, 0.3).value,
3.090031167, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
25.78, -80.22, 0.35).value,
2.98280226, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
28.717, 77.3, 0.1).value,
4.230726014, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
28.717, 77.3, 0.15).value,
4.004951665, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
28.717, 77.3, 0.3).value,
3.641943304, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
28.717, 77.3, 0.35).value,
3.550068054, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
33.94, 18.43, 0.1).value,
1.476285677, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
33.94, 18.43, 0.15).value,
1.342662497, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
33.94, 18.43, 0.3).value,
1.117630129, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
33.94, 18.43, 0.35).value,
1.061278891, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
41.9, 12.49, 0.1).value,
1.498459518, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
41.9, 12.49, 0.15).value,
1.411411719, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
41.9, 12.49, 0.3).value,
1.254176128, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
41.9, 12.49, 0.35).value,
1.214239524, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
51.5, -0.14, 0.1).value,
1.903298487, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
51.5, -0.14, 0.15).value,
1.803803604, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
51.5, -0.14, 0.3).value,
1.641289077, places=5)
def test_cloud_attenuation(self):
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 14.25, 1.0).value,
0.45517046, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 14.25, 1.0).value,
0.26338517, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 14.25, 1.0).value,
0.18779409, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 14.25, 0.5).value,
0.53457216, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 14.25, 0.5).value,
0.3230387, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 14.25, 0.5).value,
0.23923797, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 14.25, 0.3).value,
0.59136745, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 14.25, 0.3).value,
0.36114741, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 14.25, 0.3).value,
0.2872291, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 14.25, 0.2).value,
0.62448748, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 14.25, 0.2).value,
0.38863977, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 14.25, 0.2).value,
0.32069677, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 29, 1.0).value,
1.77247154, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 29, 1.0).value,
1.0256437, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 29, 1.0).value,
0.73128577, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 29, 0.5).value,
2.08166837, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 29, 0.5).value,
1.2579395, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 29, 0.5).value,
0.9316125, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 29, 0.3).value,
2.30283391, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 29, 0.3).value,
1.40633801, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 29, 0.3).value,
1.11849396, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 29, 0.2).value,
2.43180607, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 29, 0.2).value,
1.51339553, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 29, 0.2).value,
1.24881983, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 14.25, 1.0).value,
0.54183293, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 14.25, 1.0).value,
0.53317506, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 14.25, 0.5).value,
0.85746792, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 14.25, 0.5).value,
0.63956606, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 14.25, 0.3).value,
1.05602769, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 14.25, 0.3).value,
0.72266885, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 14.25, 0.2).value,
1.20844208, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 14.25, 0.2).value,
0.76093789, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 29, 1.0).value,
2.1099424, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 29, 1.0).value,
2.07622792, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 29, 0.5).value,
3.33905126, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 29, 0.5).value,
2.49052334, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 29, 0.3).value,
4.11225948, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 29, 0.3).value,
2.81413248, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 29, 0.2).value,
4.70577375, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 29, 0.2).value,
2.96315532, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 14.25, 1.0).value,
0.68560078, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 14.25, 1.0).value,
0.62214817, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 14.25, 1.0).value,
0.65764822, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 14.25, 0.5).value,
0.83179446, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 14.25, 0.5).value,
0.65489922, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 14.25, 0.5).value,
0.7181604, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 14.25, 0.3).value,
0.90773089, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 14.25, 0.3).value,
0.6771593, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 14.25, 0.3).value,
0.75244454, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 14.25, 0.2).value,
0.95830261, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 14.25, 0.2).value,
0.69030616, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 14.25, 0.2).value,
0.77111549, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 29, 1.0).value,
2.66978635, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 29, 1.0).value,
2.42269662, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 29, 1.0).value,
2.56093676, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 29, 0.5).value,
3.23907665, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 29, 0.5).value,
2.55023192, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 29, 0.5).value,
2.79657622, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 29, 0.3).value,
3.53477943, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 29, 0.3).value,
2.63691452, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 29, 0.3).value,
2.93008149, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 29, 0.2).value,
3.73170991, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 29, 0.2).value,
2.68810948, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 29, 0.2).value,
3.00278773, places=5)
class ITUR1511_1TestCase(test.TestCase):
def setUp(self):
models.itu1511.change_version(1)
def test_topographic_altitude(self):
self.assertAlmostEqual(
models.itu1511.topographic_altitude(3.133, 101.7).value,
0.23610446, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(22.9, -43.23).value,
0.0, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(23.0, 30.0).value,
0.247, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(25.78, -80.22).value,
7.511e-05, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(28.717, 77.3).value,
0.21755946, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(33.94, 18.43).value,
0.0, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(41.9, 12.49).value,
0.05670104, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(51.5, -0.14).value,
0.06916422, places=5)
class ITUR1511_2TestCase(test.TestCase):
def setUp(self):
models.itu1511.change_version(2)
def test_topographic_altitude(self):
self.assertAlmostEqual(
models.itu1511.topographic_altitude(51.5, -0.14).value,
0.031382983999999, places=4)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(41.9, 12.49).value,
0.0461229880100015, places=4)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(33.94, 18.43).value,
0, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(22.9, -43.23).value,
0, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(25.78, -80.22).value,
0.00861727999508758, places=4)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(28.717, 77.3).value,
0.209383698952704, places=4)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(3.133, 101.7).value,
0.0512514559528945, places=4)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(9.05, 38.7).value,
2.5398618775, places=4)
if __name__ == '__main__':
suite = suite()
print('Validation tests for the ITU-R models')
print('------------------------')
print(
'A total of %d test-cases are going to be tested' %
suite.countTestCases())
sys.stdout.flush()
test.TextTestRunner(verbosity=2).run(suite)
| 41.170189
| 79
| 0.571003
| 39,873
| 325,368
| 4.576982
| 0.057407
| 0.212419
| 0.262902
| 0.262512
| 0.906815
| 0.89384
| 0.877248
| 0.869527
| 0.679432
| 0.599979
| 0
| 0.29676
| 0.307387
| 325,368
| 7,902
| 80
| 41.175399
| 0.513068
| 0.11648
| 0
| 0.552644
| 0
| 0
| 0.003047
| 0.00216
| 0
| 0
| 0
| 0
| 0.263945
| 1
| 0.007394
| false
| 0
| 0.000804
| 0
| 0.010609
| 0.000482
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4ad0ad913286795ca9bacb86a6daf83bb7f98c19
| 157
|
py
|
Python
|
example_project/app/utils.py
|
ivan-sysoi/django-photoslib
|
ffab2a7c238bcfec709a2db31fdd3b40757cf730
|
[
"MIT"
] | null | null | null |
example_project/app/utils.py
|
ivan-sysoi/django-photoslib
|
ffab2a7c238bcfec709a2db31fdd3b40757cf730
|
[
"MIT"
] | 11
|
2020-04-05T17:46:46.000Z
|
2022-02-12T05:11:38.000Z
|
example_project/app/utils.py
|
ivan-sysoi/django-photoslib
|
ffab2a7c238bcfec709a2db31fdd3b40757cf730
|
[
"MIT"
] | null | null | null |
from photoslib.utils import default_photo_serializer
def custom_photo_serializer(photo, request=None):
return default_photo_serializer(photo, request)
| 26.166667
| 52
| 0.840764
| 20
| 157
| 6.3
| 0.6
| 0.357143
| 0.349206
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101911
| 157
| 5
| 53
| 31.4
| 0.893617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 9
|
4ae9d3a465ae586119eb5d21595e1678bd2e1bbb
| 3,396
|
py
|
Python
|
tests/core/utilities/test_prepare_transaction_replacement.py
|
iamdefinitelyahuman/web3.py
|
7cc996723841895b9cc4feac354bc06d711dee05
|
[
"MIT"
] | 2
|
2020-11-10T10:52:50.000Z
|
2021-06-08T02:34:32.000Z
|
tests/core/utilities/test_prepare_transaction_replacement.py
|
iamdefinitelyahuman/web3.py
|
7cc996723841895b9cc4feac354bc06d711dee05
|
[
"MIT"
] | null | null | null |
tests/core/utilities/test_prepare_transaction_replacement.py
|
iamdefinitelyahuman/web3.py
|
7cc996723841895b9cc4feac354bc06d711dee05
|
[
"MIT"
] | 2
|
2021-01-20T10:57:34.000Z
|
2021-11-02T01:10:48.000Z
|
import pytest
from web3._utils.transactions import (
prepare_replacement_transaction,
)
SIMPLE_CURRENT_TRANSACTION = {
'blockHash': None,
'hash': '0x0',
'nonce': 2,
'gasPrice': 10,
}
def test_prepare_transaction_replacement(web3):
current_transaction = SIMPLE_CURRENT_TRANSACTION
new_transaction = {
'value': 1,
'nonce': 2,
}
replacement_transaction = prepare_replacement_transaction(
web3, current_transaction, new_transaction)
assert replacement_transaction == {
'value': 1,
'nonce': 2,
'gasPrice': 12,
}
def test_prepare_transaction_replacement_without_nonce_sets_correct_nonce(web3):
current_transaction = SIMPLE_CURRENT_TRANSACTION
new_transaction = {
'value': 1,
}
replacement_transaction = prepare_replacement_transaction(
web3, current_transaction, new_transaction)
assert replacement_transaction == {
'value': 1,
'nonce': 2,
'gasPrice': 12,
}
def test_prepare_transaction_replacement_already_mined_raises(web3):
with pytest.raises(ValueError):
prepare_replacement_transaction(
web3, {'blockHash': '0xa1a1a1', 'hash': '0x0'}, {'value': 2})
def test_prepare_transaction_replacement_nonce_mismatch_raises(web3):
with pytest.raises(ValueError):
prepare_replacement_transaction(web3, {
'blockHash': None,
'hash': '0x0',
'nonce': 1,
}, {
'nonce': 2,
})
def test_prepare_transaction_replacement_not_higher_gas_price_raises(web3):
current_transaction = SIMPLE_CURRENT_TRANSACTION
new_transaction = {
'value': 1,
'gasPrice': 5,
}
with pytest.raises(ValueError):
prepare_replacement_transaction(
web3, current_transaction, new_transaction)
# Also raises when equal to the current transaction
new_transaction['gasPrice'] = 10
with pytest.raises(ValueError):
prepare_replacement_transaction(web3, current_transaction, new_transaction)
def test_prepare_transaction_replacement_gas_price_defaulting(web3):
current_transaction = SIMPLE_CURRENT_TRANSACTION
new_transaction = {
'value': 2,
}
replacement_transaction = prepare_replacement_transaction(
web3, current_transaction, new_transaction)
assert replacement_transaction['gasPrice'] == 12
def test_prepare_transaction_replacement_gas_price_defaulting_when_strategy_higer(web3):
def higher_gas_price_strategy(web3, txn):
return 20
web3.eth.setGasPriceStrategy(higher_gas_price_strategy)
current_transaction = SIMPLE_CURRENT_TRANSACTION
new_transaction = {
'value': 2,
}
replacement_transaction = prepare_replacement_transaction(
web3, current_transaction, new_transaction)
assert replacement_transaction['gasPrice'] == 20
def test_prepare_transaction_replacement_gas_price_defaulting_when_strategy_lower(web3):
def lower_gas_price_strategy(web3, txn):
return 5
web3.eth.setGasPriceStrategy(lower_gas_price_strategy)
current_transaction = SIMPLE_CURRENT_TRANSACTION
new_transaction = {
'value': 2,
}
replacement_transaction = prepare_replacement_transaction(
web3, current_transaction, new_transaction)
assert replacement_transaction['gasPrice'] == 12
| 27.609756
| 88
| 0.707892
| 339
| 3,396
| 6.681416
| 0.162242
| 0.166887
| 0.129801
| 0.197792
| 0.829139
| 0.792936
| 0.734658
| 0.734658
| 0.710817
| 0.709051
| 0
| 0.024263
| 0.211131
| 3,396
| 122
| 89
| 27.836066
| 0.821202
| 0.014429
| 0
| 0.577778
| 0
| 0
| 0.058296
| 0
| 0
| 0
| 0.005082
| 0
| 0.055556
| 1
| 0.111111
| false
| 0
| 0.022222
| 0.022222
| 0.155556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ab20f2813b1f0b053e84e17ca7d3591cc2f3398e
| 1,535
|
py
|
Python
|
iotorch/utils/iotorchutils.py
|
juanmagal/iot-slice-orchestrator
|
9fc0df4f74b2788ea116549c001bcb8c6b663280
|
[
"Apache-2.0"
] | 2
|
2019-08-16T13:05:39.000Z
|
2019-12-24T16:57:29.000Z
|
iotorch/utils/iotorchutils.py
|
juanmagal/iot-slice-orchestrator
|
9fc0df4f74b2788ea116549c001bcb8c6b663280
|
[
"Apache-2.0"
] | null | null | null |
iotorch/utils/iotorchutils.py
|
juanmagal/iot-slice-orchestrator
|
9fc0df4f74b2788ea116549c001bcb8c6b663280
|
[
"Apache-2.0"
] | null | null | null |
import toml
K8S_CLUSTERS_KEY = 'k8sclusters'
K8S_CLUSTER_CONTEXT_ATTR = 'k8scontext'
K8S_CLUSTER_IP_ATTR = 'ip'
K8S_CLUSTER_HELM_PORT_ATTR = 'helmport'
K8S_CLUSTER_HELM_IP_ATTR = 'helmip'
def getk8sclustercontext(cluster,config_path):
with open(config_path) as f:
config = toml.load(f)
f.close
clusters = config.get(K8S_CLUSTERS_KEY)
if clusters == None:
return None
cluster = clusters.get(cluster)
if cluster == None:
return None
return cluster.get(K8S_CLUSTER_CONTEXT_ATTR)
def getk8sclusterip(cluster,config_path):
with open(config_path) as f:
config = toml.load(f)
f.close
clusters = config.get(K8S_CLUSTERS_KEY)
if clusters == None:
return None
cluster = clusters.get(cluster)
if cluster == None:
return None
return cluster.get(K8S_CLUSTER_IP_ATTR)
def getk8sclusterhelmport(cluster,config_path):
with open(config_path) as f:
config = toml.load(f)
f.close
clusters = config.get(K8S_CLUSTERS_KEY)
if clusters == None:
return None
cluster = clusters.get(cluster)
if cluster == None:
return None
return cluster.get(K8S_CLUSTER_HELM_PORT_ATTR)
def getk8sclusterhelmip(cluster,config_path):
with open(config_path) as f:
config = toml.load(f)
f.close
clusters = config.get(K8S_CLUSTERS_KEY)
if clusters == None:
return None
cluster = clusters.get(cluster)
if cluster == None:
return None
return cluster.get(K8S_CLUSTER_HELM_IP_ATTR)
| 20.743243
| 49
| 0.69316
| 206
| 1,535
| 4.941748
| 0.15534
| 0.117878
| 0.11002
| 0.082515
| 0.801572
| 0.746562
| 0.746562
| 0.746562
| 0.746562
| 0.746562
| 0
| 0.0159
| 0.221498
| 1,535
| 73
| 50
| 21.027397
| 0.835983
| 0
| 0
| 0.72
| 0
| 0
| 0.024104
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.02
| 0
| 0.34
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
db714d8ecd47117a9aea7e87cc3355f529e0fc5c
| 23,938
|
py
|
Python
|
sdk/python/pulumi_aws/sagemaker/user_profile.py
|
aamir-locus/pulumi-aws
|
3e234b050129bde35d8e072a88bd608562f02142
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/sagemaker/user_profile.py
|
aamir-locus/pulumi-aws
|
3e234b050129bde35d8e072a88bd608562f02142
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/sagemaker/user_profile.py
|
aamir-locus/pulumi-aws
|
3e234b050129bde35d8e072a88bd608562f02142
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['UserProfileArgs', 'UserProfile']
@pulumi.input_type
class UserProfileArgs:
def __init__(__self__, *,
domain_id: pulumi.Input[str],
user_profile_name: pulumi.Input[str],
single_sign_on_user_identifier: Optional[pulumi.Input[str]] = None,
single_sign_on_user_value: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_settings: Optional[pulumi.Input['UserProfileUserSettingsArgs']] = None):
"""
The set of arguments for constructing a UserProfile resource.
:param pulumi.Input[str] domain_id: The ID of the associated Domain.
:param pulumi.Input[str] user_profile_name: The name for the User Profile.
:param pulumi.Input[str] single_sign_on_user_identifier: A specifier for the type of value specified in `single_sign_on_user_value`. Currently, the only supported value is `UserName`. If the Domain's AuthMode is SSO, this field is required. If the Domain's AuthMode is not SSO, this field cannot be specified.
:param pulumi.Input[str] single_sign_on_user_value: The username of the associated AWS Single Sign-On User for this User Profile. If the Domain's AuthMode is SSO, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not SSO, this field cannot be specified.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
:param pulumi.Input['UserProfileUserSettingsArgs'] user_settings: The user settings. See User Settings below.
"""
pulumi.set(__self__, "domain_id", domain_id)
pulumi.set(__self__, "user_profile_name", user_profile_name)
if single_sign_on_user_identifier is not None:
pulumi.set(__self__, "single_sign_on_user_identifier", single_sign_on_user_identifier)
if single_sign_on_user_value is not None:
pulumi.set(__self__, "single_sign_on_user_value", single_sign_on_user_value)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if user_settings is not None:
pulumi.set(__self__, "user_settings", user_settings)
@property
@pulumi.getter(name="domainId")
def domain_id(self) -> pulumi.Input[str]:
"""
The ID of the associated Domain.
"""
return pulumi.get(self, "domain_id")
@domain_id.setter
def domain_id(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_id", value)
@property
@pulumi.getter(name="userProfileName")
def user_profile_name(self) -> pulumi.Input[str]:
"""
The name for the User Profile.
"""
return pulumi.get(self, "user_profile_name")
@user_profile_name.setter
def user_profile_name(self, value: pulumi.Input[str]):
pulumi.set(self, "user_profile_name", value)
@property
@pulumi.getter(name="singleSignOnUserIdentifier")
def single_sign_on_user_identifier(self) -> Optional[pulumi.Input[str]]:
"""
A specifier for the type of value specified in `single_sign_on_user_value`. Currently, the only supported value is `UserName`. If the Domain's AuthMode is SSO, this field is required. If the Domain's AuthMode is not SSO, this field cannot be specified.
"""
return pulumi.get(self, "single_sign_on_user_identifier")
@single_sign_on_user_identifier.setter
def single_sign_on_user_identifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "single_sign_on_user_identifier", value)
@property
@pulumi.getter(name="singleSignOnUserValue")
def single_sign_on_user_value(self) -> Optional[pulumi.Input[str]]:
"""
The username of the associated AWS Single Sign-On User for this User Profile. If the Domain's AuthMode is SSO, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not SSO, this field cannot be specified.
"""
return pulumi.get(self, "single_sign_on_user_value")
@single_sign_on_user_value.setter
def single_sign_on_user_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "single_sign_on_user_value", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="userSettings")
def user_settings(self) -> Optional[pulumi.Input['UserProfileUserSettingsArgs']]:
"""
The user settings. See User Settings below.
"""
return pulumi.get(self, "user_settings")
@user_settings.setter
def user_settings(self, value: Optional[pulumi.Input['UserProfileUserSettingsArgs']]):
pulumi.set(self, "user_settings", value)
@pulumi.input_type
class _UserProfileState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
domain_id: Optional[pulumi.Input[str]] = None,
home_efs_file_system_uid: Optional[pulumi.Input[str]] = None,
single_sign_on_user_identifier: Optional[pulumi.Input[str]] = None,
single_sign_on_user_value: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_profile_name: Optional[pulumi.Input[str]] = None,
user_settings: Optional[pulumi.Input['UserProfileUserSettingsArgs']] = None):
"""
Input properties used for looking up and filtering UserProfile resources.
:param pulumi.Input[str] arn: The user profile Amazon Resource Name (ARN).
:param pulumi.Input[str] domain_id: The ID of the associated Domain.
:param pulumi.Input[str] home_efs_file_system_uid: The ID of the user's profile in the Amazon Elastic File System (EFS) volume.
:param pulumi.Input[str] single_sign_on_user_identifier: A specifier for the type of value specified in `single_sign_on_user_value`. Currently, the only supported value is `UserName`. If the Domain's AuthMode is SSO, this field is required. If the Domain's AuthMode is not SSO, this field cannot be specified.
:param pulumi.Input[str] single_sign_on_user_value: The username of the associated AWS Single Sign-On User for this User Profile. If the Domain's AuthMode is SSO, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not SSO, this field cannot be specified.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
:param pulumi.Input[str] user_profile_name: The name for the User Profile.
:param pulumi.Input['UserProfileUserSettingsArgs'] user_settings: The user settings. See User Settings below.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if domain_id is not None:
pulumi.set(__self__, "domain_id", domain_id)
if home_efs_file_system_uid is not None:
pulumi.set(__self__, "home_efs_file_system_uid", home_efs_file_system_uid)
if single_sign_on_user_identifier is not None:
pulumi.set(__self__, "single_sign_on_user_identifier", single_sign_on_user_identifier)
if single_sign_on_user_value is not None:
pulumi.set(__self__, "single_sign_on_user_value", single_sign_on_user_value)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if user_profile_name is not None:
pulumi.set(__self__, "user_profile_name", user_profile_name)
if user_settings is not None:
pulumi.set(__self__, "user_settings", user_settings)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The user profile Amazon Resource Name (ARN).
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="domainId")
def domain_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the associated Domain.
"""
return pulumi.get(self, "domain_id")
@domain_id.setter
def domain_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain_id", value)
@property
@pulumi.getter(name="homeEfsFileSystemUid")
def home_efs_file_system_uid(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the user's profile in the Amazon Elastic File System (EFS) volume.
"""
return pulumi.get(self, "home_efs_file_system_uid")
@home_efs_file_system_uid.setter
def home_efs_file_system_uid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "home_efs_file_system_uid", value)
@property
@pulumi.getter(name="singleSignOnUserIdentifier")
def single_sign_on_user_identifier(self) -> Optional[pulumi.Input[str]]:
"""
A specifier for the type of value specified in `single_sign_on_user_value`. Currently, the only supported value is `UserName`. If the Domain's AuthMode is SSO, this field is required. If the Domain's AuthMode is not SSO, this field cannot be specified.
"""
return pulumi.get(self, "single_sign_on_user_identifier")
@single_sign_on_user_identifier.setter
def single_sign_on_user_identifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "single_sign_on_user_identifier", value)
@property
@pulumi.getter(name="singleSignOnUserValue")
def single_sign_on_user_value(self) -> Optional[pulumi.Input[str]]:
"""
The username of the associated AWS Single Sign-On User for this User Profile. If the Domain's AuthMode is SSO, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not SSO, this field cannot be specified.
"""
return pulumi.get(self, "single_sign_on_user_value")
@single_sign_on_user_value.setter
def single_sign_on_user_value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "single_sign_on_user_value", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="userProfileName")
def user_profile_name(self) -> Optional[pulumi.Input[str]]:
"""
The name for the User Profile.
"""
return pulumi.get(self, "user_profile_name")
@user_profile_name.setter
def user_profile_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_profile_name", value)
@property
@pulumi.getter(name="userSettings")
def user_settings(self) -> Optional[pulumi.Input['UserProfileUserSettingsArgs']]:
"""
The user settings. See User Settings below.
"""
return pulumi.get(self, "user_settings")
@user_settings.setter
def user_settings(self, value: Optional[pulumi.Input['UserProfileUserSettingsArgs']]):
pulumi.set(self, "user_settings", value)
class UserProfile(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
domain_id: Optional[pulumi.Input[str]] = None,
single_sign_on_user_identifier: Optional[pulumi.Input[str]] = None,
single_sign_on_user_value: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_profile_name: Optional[pulumi.Input[str]] = None,
user_settings: Optional[pulumi.Input[pulumi.InputType['UserProfileUserSettingsArgs']]] = None,
__props__=None):
"""
Provides a Sagemaker User Profile resource.
## Example Usage
### Basic usage
```python
import pulumi
import pulumi_aws as aws
example = aws.sagemaker.UserProfile("example",
domain_id=aws_sagemaker_domain["test"]["id"],
user_profile_name="example")
```
## Import
Sagemaker Code User Profiles can be imported using the `arn`, e.g.
```sh
$ pulumi import aws:sagemaker/userProfile:UserProfile test_user_profile arn:aws:sagemaker:us-west-2:123456789012:user-profile/domain-id/profile-name
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] domain_id: The ID of the associated Domain.
:param pulumi.Input[str] single_sign_on_user_identifier: A specifier for the type of value specified in `single_sign_on_user_value`. Currently, the only supported value is `UserName`. If the Domain's AuthMode is SSO, this field is required. If the Domain's AuthMode is not SSO, this field cannot be specified.
:param pulumi.Input[str] single_sign_on_user_value: The username of the associated AWS Single Sign-On User for this User Profile. If the Domain's AuthMode is SSO, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not SSO, this field cannot be specified.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
:param pulumi.Input[str] user_profile_name: The name for the User Profile.
:param pulumi.Input[pulumi.InputType['UserProfileUserSettingsArgs']] user_settings: The user settings. See User Settings below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: UserProfileArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Sagemaker User Profile resource.
## Example Usage
### Basic usage
```python
import pulumi
import pulumi_aws as aws
example = aws.sagemaker.UserProfile("example",
domain_id=aws_sagemaker_domain["test"]["id"],
user_profile_name="example")
```
## Import
Sagemaker Code User Profiles can be imported using the `arn`, e.g.
```sh
$ pulumi import aws:sagemaker/userProfile:UserProfile test_user_profile arn:aws:sagemaker:us-west-2:123456789012:user-profile/domain-id/profile-name
```
:param str resource_name: The name of the resource.
:param UserProfileArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserProfileArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
domain_id: Optional[pulumi.Input[str]] = None,
single_sign_on_user_identifier: Optional[pulumi.Input[str]] = None,
single_sign_on_user_value: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_profile_name: Optional[pulumi.Input[str]] = None,
user_settings: Optional[pulumi.Input[pulumi.InputType['UserProfileUserSettingsArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = UserProfileArgs.__new__(UserProfileArgs)
if domain_id is None and not opts.urn:
raise TypeError("Missing required property 'domain_id'")
__props__.__dict__["domain_id"] = domain_id
__props__.__dict__["single_sign_on_user_identifier"] = single_sign_on_user_identifier
__props__.__dict__["single_sign_on_user_value"] = single_sign_on_user_value
__props__.__dict__["tags"] = tags
if user_profile_name is None and not opts.urn:
raise TypeError("Missing required property 'user_profile_name'")
__props__.__dict__["user_profile_name"] = user_profile_name
__props__.__dict__["user_settings"] = user_settings
__props__.__dict__["arn"] = None
__props__.__dict__["home_efs_file_system_uid"] = None
super(UserProfile, __self__).__init__(
'aws:sagemaker/userProfile:UserProfile',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
domain_id: Optional[pulumi.Input[str]] = None,
home_efs_file_system_uid: Optional[pulumi.Input[str]] = None,
single_sign_on_user_identifier: Optional[pulumi.Input[str]] = None,
single_sign_on_user_value: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
user_profile_name: Optional[pulumi.Input[str]] = None,
user_settings: Optional[pulumi.Input[pulumi.InputType['UserProfileUserSettingsArgs']]] = None) -> 'UserProfile':
"""
Get an existing UserProfile resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The user profile Amazon Resource Name (ARN).
:param pulumi.Input[str] domain_id: The ID of the associated Domain.
:param pulumi.Input[str] home_efs_file_system_uid: The ID of the user's profile in the Amazon Elastic File System (EFS) volume.
:param pulumi.Input[str] single_sign_on_user_identifier: A specifier for the type of value specified in `single_sign_on_user_value`. Currently, the only supported value is `UserName`. If the Domain's AuthMode is SSO, this field is required. If the Domain's AuthMode is not SSO, this field cannot be specified.
:param pulumi.Input[str] single_sign_on_user_value: The username of the associated AWS Single Sign-On User for this User Profile. If the Domain's AuthMode is SSO, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not SSO, this field cannot be specified.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
:param pulumi.Input[str] user_profile_name: The name for the User Profile.
:param pulumi.Input[pulumi.InputType['UserProfileUserSettingsArgs']] user_settings: The user settings. See User Settings below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _UserProfileState.__new__(_UserProfileState)
__props__.__dict__["arn"] = arn
__props__.__dict__["domain_id"] = domain_id
__props__.__dict__["home_efs_file_system_uid"] = home_efs_file_system_uid
__props__.__dict__["single_sign_on_user_identifier"] = single_sign_on_user_identifier
__props__.__dict__["single_sign_on_user_value"] = single_sign_on_user_value
__props__.__dict__["tags"] = tags
__props__.__dict__["user_profile_name"] = user_profile_name
__props__.__dict__["user_settings"] = user_settings
return UserProfile(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The user profile Amazon Resource Name (ARN).
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="domainId")
def domain_id(self) -> pulumi.Output[str]:
"""
The ID of the associated Domain.
"""
return pulumi.get(self, "domain_id")
@property
@pulumi.getter(name="homeEfsFileSystemUid")
def home_efs_file_system_uid(self) -> pulumi.Output[str]:
"""
The ID of the user's profile in the Amazon Elastic File System (EFS) volume.
"""
return pulumi.get(self, "home_efs_file_system_uid")
@property
@pulumi.getter(name="singleSignOnUserIdentifier")
def single_sign_on_user_identifier(self) -> pulumi.Output[Optional[str]]:
"""
A specifier for the type of value specified in `single_sign_on_user_value`. Currently, the only supported value is `UserName`. If the Domain's AuthMode is SSO, this field is required. If the Domain's AuthMode is not SSO, this field cannot be specified.
"""
return pulumi.get(self, "single_sign_on_user_identifier")
@property
@pulumi.getter(name="singleSignOnUserValue")
def single_sign_on_user_value(self) -> pulumi.Output[Optional[str]]:
"""
The username of the associated AWS Single Sign-On User for this User Profile. If the Domain's AuthMode is SSO, this field is required, and must match a valid username of a user in your directory. If the Domain's AuthMode is not SSO, this field cannot be specified.
"""
return pulumi.get(self, "single_sign_on_user_value")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="userProfileName")
def user_profile_name(self) -> pulumi.Output[str]:
"""
The name for the User Profile.
"""
return pulumi.get(self, "user_profile_name")
@property
@pulumi.getter(name="userSettings")
def user_settings(self) -> pulumi.Output[Optional['outputs.UserProfileUserSettings']]:
"""
The user settings. See User Settings below.
"""
return pulumi.get(self, "user_settings")
| 49.561077
| 324
| 0.676289
| 3,125
| 23,938
| 4.91904
| 0.06048
| 0.076568
| 0.071949
| 0.079105
| 0.88401
| 0.869568
| 0.857143
| 0.840424
| 0.8293
| 0.814143
| 0
| 0.001462
| 0.228382
| 23,938
| 482
| 325
| 49.6639
| 0.830762
| 0.358844
| 0
| 0.695167
| 1
| 0
| 0.134971
| 0.076815
| 0
| 0
| 0
| 0
| 0
| 1
| 0.159851
| false
| 0.003717
| 0.026022
| 0
| 0.282528
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
db7a1ccd38ea3db0037170872ea1278af1e83ca4
| 8,405
|
py
|
Python
|
src/models/cnn.py
|
GregoireThoumyre/Bathymetry-Inversion
|
cae383c888e4f14fa43b4adfbf5eba215b8b5f59
|
[
"MIT"
] | null | null | null |
src/models/cnn.py
|
GregoireThoumyre/Bathymetry-Inversion
|
cae383c888e4f14fa43b4adfbf5eba215b8b5f59
|
[
"MIT"
] | 9
|
2020-01-28T22:57:32.000Z
|
2022-02-10T00:14:27.000Z
|
src/models/cnn.py
|
GregoireThoumyre/Bathymetry-Inversion
|
cae383c888e4f14fa43b4adfbf5eba215b8b5f59
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File: cnn.py |
Created on the 2019-02-22 |
Github: https://github.com/pl19n72019
This file contains the different models of cnn.
"""
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from keras.models import Sequential
class Model1:
"""A first CNN model.
The model is a convolutional model, adapted to the problem. The inputs
have originally two dimensions, the timestack size. The size of the output
is the number of points in the discretization of the domain.
The model is composed of three convolutional layers, followed by a fully-
connected layer (a hidden flatten layer is required to link both).
Examples:
>>> cnn = Model1((50, 100, 1), 200)
>>> print(cnn.model().summary())
"""
def __init__(self, input_shape, output_size):
"""Creation of a first CNN model.
The model is a convolutional model, adapted to the problem. The inputs
have originally two dimensions, the timestack size. The size of the
output is the number of points in the discretization of the domain.
Args:
input_shape (tuple): Input shape of the model, typically (_, _, 1).
output_size (int): Output size of the model (discretization of the
domain).
Note:
The model can be used as a template. The headers and the
specifications need to be fulfilled.
"""
self.__cnn = Sequential(name="CNN")
self.__set_model(input_shape, output_size)
def __set_model(self, input_shape, output_size):
"""Creation of all the layers of the network.
The model is composed of three convolutional layers with maxpooling and dropout, followed by a
fully-connected layer (a hidden flatten layer is required to link both).
Args:
input_shape (tuple): Input shape of the model, typically (_, _, 1).
output_size (int): Output size of the model (discretization of the
domain).
"""
self.__cnn.add(Conv2D(64, (2, 2), activation='relu', padding='same',
input_shape=input_shape))
self.__cnn.add(MaxPooling2D((2, 2)))
# self.__cnn.add(Dropout(0.25))
self.__cnn.add(Conv2D(128, (2, 2), activation='relu', padding='same'))
self.__cnn.add(MaxPooling2D((2, 2)))
# self.__cnn.add(Dropout(0.25))
self.__cnn.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
self.__cnn.add(MaxPooling2D((2, 2)))
# self.__cnn.add(Dropout(0.25))
self.__cnn.add(Flatten())
self.__cnn.add(Dense(units=output_size))
def model(self):
"""Model in the keras format.
After calling this method, all the keras functions that can be applied
on a model (compile, fit, ...) can be called on the output of this
method.
Returns:
The model in the keras format.
"""
return self.__cnn
class Model2:
"""A second, deeper CNN model.
The model is a convolutional model, adapted to the problem. The inputs
have originally two dimensions, the timestack size. The size of the output
is the number of points in the discretization of the domain.
The model is composed of eight convolutional layers, followed by a fully-
connected layer (a hidden flatten layer is required to link both).
Examples:
>>> cnn = Model2((50, 100, 1), 200)
>>> print(cnn.model().summary())
"""
def __init__(self, input_shape, output_size):
"""Creation of a first CNN model.
The model is a convolutional model, adapted to the problem. The inputs
have originally two dimensions, the timestack size. The size of the
output is the number of points in the discretization of the domain.
Args:
input_shape (tuple): Input shape of the model, typically (_, _, 1).
output_size (int): Output size of the model (discretization of the
domain).
Note:
The model can be used as a template. The headers and the
specifications need to be fulfilled.
"""
self.__cnn = Sequential(name="DCNN")
self.__set_model(input_shape, output_size)
def __set_model(self, input_shape, output_size):
"""Creation of all the layers of the network.
The model is composed of eight convolutional layers, followed by a
fully-connected layer (a hidden flatten layer is required to link both).
Args:
input_shape (tuple): Input shape of the model, typically (_, _, 1).
output_size (int): Output size of the model (discretization of the
domain).
"""
self.__cnn.add(Conv2D(64, (2, 2), activation='relu', padding='same',
input_shape=input_shape))
self.__cnn.add(Conv2D(64, (2, 2), activation='relu', padding='same'))
self.__cnn.add(MaxPooling2D((2, 2)))
self.__cnn.add(Dropout(0.25))
self.__cnn.add(Conv2D(128, (2, 2), activation='relu', padding='same'))
self.__cnn.add(Conv2D(128, (2, 2), activation='relu', padding='same'))
self.__cnn.add(MaxPooling2D((2, 2)))
self.__cnn.add(Dropout(0.25))
self.__cnn.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
self.__cnn.add(Conv2D(256, (2, 2), activation='relu', padding='same'))
self.__cnn.add(MaxPooling2D((2, 2)))
self.__cnn.add(Dropout(0.25))
self.__cnn.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
self.__cnn.add(Conv2D(512, (2, 2), activation='relu', padding='same'))
self.__cnn.add(MaxPooling2D((2, 2)))
self.__cnn.add(Dropout(0.25))
self.__cnn.add(Flatten())
self.__cnn.add(Dense(300))
self.__cnn.add(Dense(units=output_size))
def model(self):
"""Model in the keras format.
After calling this method, all the keras functions that can be applied
on a model (compile, fit, ...) can be called on the output of this
method.
Returns:
The model in the keras format.
"""
return self.__cnn
class Model3:
"""A MLP (Deep Feed Forward Neural Network) model.
WRONG The model is a convolutional model, adapted to the problem. The size of the output
is the number of points in the discretization of the domain.
WRONG The model is composed of three convolutional layers, followed by a fully-
connected layer (a hidden flatten layer is required to link both).
Examples:
TODO
"""
def __init__(self, input_shape, output_size):
"""
Args:
input_shape (tuple): Input shape of the model, typically (_, _, 1).
output_size (int): Output size of the model (discretization of the
domain).
Note:
The model can be used as a template. The headers and the
specifications need to be fulfilled.
"""
self.__cnn = Sequential(name="MLP")
self.__set_model(input_shape, output_size)
def __set_model(self, input_shape, output_size):
"""Creation of all the layers of the network.
The model is composed of three convolutional layers, followed by a
fully-connected layer (a hidden flatten layer is required to link both).
Args:
input_shape (tuple): Input shape of the model, typically (_, _, 1).
output_size (int): Output size of the model (discretization of the
domain).
"""
self.__cnn.add(Dense(64, activation='relu', input_shape=input_shape))
self.__cnn.add(Dropout(0.2))
self.__cnn.add(Dense(64, activation='relu'))
self.__cnn.add(Dropout(0.2))
self.__cnn.add(Dense(64, activation='relu'))
self.__cnn.add(Dropout(0.2))
self.__cnn.add(Flatten())
self.__cnn.add(Dense(units=output_size))
def model(self):
"""Model in the keras format.
After calling this method, all the keras functions that can be applied
on a model (compile, fit, ...) can be called on the output of this
method.
Returns:
The model in the keras format.
"""
return self.__cnn
| 33.62
| 102
| 0.621416
| 1,135
| 8,405
| 4.452863
| 0.125991
| 0.060942
| 0.075188
| 0.054412
| 0.930946
| 0.929165
| 0.929165
| 0.914919
| 0.914919
| 0.914919
| 0
| 0.027636
| 0.27674
| 8,405
| 249
| 103
| 33.75502
| 0.803751
| 0.546817
| 0
| 0.766667
| 0
| 0
| 0.035889
| 0
| 0
| 0
| 0
| 0.004016
| 0
| 1
| 0.15
| false
| 0
| 0.033333
| 0
| 0.283333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
91762683cfd0266afef1e7a86c3839b60fc78716
| 2,968
|
py
|
Python
|
test/testwhile.py
|
mvz/vb2py
|
6ea046f6fc202527a1b3fcd3ef5a67b969dea715
|
[
"BSD-3-Clause"
] | 2
|
2015-12-01T10:52:36.000Z
|
2021-04-20T05:15:01.000Z
|
test/testwhile.py
|
mvz/vb2py
|
6ea046f6fc202527a1b3fcd3ef5a67b969dea715
|
[
"BSD-3-Clause"
] | 4
|
2016-07-18T18:28:24.000Z
|
2016-07-19T08:30:14.000Z
|
test/testwhile.py
|
mvz/vb2py
|
6ea046f6fc202527a1b3fcd3ef5a67b969dea715
|
[
"BSD-3-Clause"
] | 3
|
2015-07-15T21:08:19.000Z
|
2021-02-25T09:39:12.000Z
|
from testframework import *
# << While tests >> (1 of 5)
# Simple while / wend
tests.append(("""
a = 0
b = 0
While a<=10
b = b + a
a = a + 1
Wend
""", {"a" : 11, "b" : 55}))
# Nested While
tests.append(("""
a = 0
b = 0
While a<=10
c = 0
While c < 10
c = c + 1
b = b + 1
Wend
b = b + a
a = a + 1
Wend
""", {"a" : 11, "b" : 165, "c" : 10}))
# << While tests >> (2 of 5)
# Simple do while loop
tests.append(("""
a = 0
b = 0
Do While a<=10
b = b + a
a = a + 1
Loop
""", {"a" : 11, "b" : 55}))
# Simple do while loop with exit
tests.append(("""
a = 1
b = 0
Do While a<=10
b = b + a
a = a + 1
Exit Do
Loop
""", {"a" : 2, "b" : 1}))
# Nested Do While Loop
tests.append(("""
a = 0
b = 0
Do While a<=10
c = 0
Do While c < 10
c = c + 1
b = b + 1
Loop
b = b + a
a = a + 1
Loop
""", {"a" : 11, "b" : 165, "c" : 10}))
# Nested Do While Loop With inner exit
tests.append(("""
a = 0
b = 0
Do While a<=10
c = 0
Do While c < 10
c = c + 1
b = b + 1
Exit Do
Loop
b = b + a
a = a + 1
Loop
""", {"a" : 11, "b" : 66, "c" : 1}))
# Nested Do While Loop With outer exit
tests.append(("""
a = 0
b = 0
Do While a<=10
c = 0
Do While c < 10
c = c + 1
b = b + 1
Loop
b = b + a
a = a + 1
Exit Do
Loop
""", {"a" : 1, "b" : 10, "c" : 10}))
# << While tests >> (3 of 5)
# Simple do while loop with exit
tests.append(("""
a = 1
b = 0
Do
b = b + a
a = a + 1
Exit Do
Loop
""", {"a" : 2, "b" : 1}))
# Nested Do While Loop With inner exit
tests.append(("""
a = 0
b = 0
Do
c = 0
Do While c < 10
c = c + 1
b = b + 1
Exit Do
Loop
b = b + a
a = a + 1
Exit Do
Loop
""", {"a" : 1, "b" : 1, "c" : 1}))
# Nested Do While Loop With outer exit
tests.append(("""
a = 0
b = 0
Do
c = 0
Do While c < 10
c = c + 1
b = b + 1
Loop
b = b + a
a = a + 1
Exit Do
Loop
""", {"a" : 1, "b" : 10, "c" : 10}))
# << While tests >> (4 of 5)
# Simple do until
tests.append(("""
a = 1
b = 0
Do
b = b + a
a = a + 1
Loop Until a > 10
""", {"a" : 11, "b" : 55}))
# Nested Do Until
tests.append(("""
a = 0
b = 0
Do
c = 0
Do
c = c + 1
b = b + 1
Loop Until c > 10
b = b + a
a = a + 1
Loop Until a > 10
""", {"a" : 11, "b" : 176, "c" : 11}))
# << While tests >> (5 of 5)
# Simple do until
tests.append(("""
a = 1
b = 0
Do Until a > 10
b = b + a
a = a + 1
Loop
""", {"a" : 11, "b" : 55}))
# Nested Do Until
tests.append(("""
a = 0
b = 0
Do Until a > 10
c = 0
Do Until c > 10
c = c + 1
b = b + 1
Loop
b = b + a
a = a + 1
Loop
""", {"a" : 11, "b" : 176, "c" : 11}))
# -- end -- << While tests >>
import vb2py.vbparser
vb2py.vbparser.log.setLevel(0) # Don't print all logging stuff
TestClass = addTestsTo(BasicTest, tests)
if __name__ == "__main__":
main()
| 14.766169
| 62
| 0.434973
| 534
| 2,968
| 2.402622
| 0.09176
| 0.043648
| 0.130943
| 0.043648
| 0.823071
| 0.812159
| 0.773967
| 0.766173
| 0.764614
| 0.717849
| 0
| 0.099133
| 0.378032
| 2,968
| 200
| 63
| 14.84
| 0.595883
| 0.182278
| 0
| 0.933333
| 0
| 0
| 0.644075
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.012121
| 0
| 0.012121
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
91b2714ba0778f35f60b0b6bcd511c0e6cb7b602
| 44,984
|
py
|
Python
|
tests/test_operationdefinition.py
|
glichtner/fhir.resources
|
94896d8f8a0b7dd69253762aab968f4fd6eb69a0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_operationdefinition.py
|
glichtner/fhir.resources
|
94896d8f8a0b7dd69253762aab968f4fd6eb69a0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_operationdefinition.py
|
glichtner/fhir.resources
|
94896d8f8a0b7dd69253762aab968f4fd6eb69a0
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/OperationDefinition
Release: R5
Version: 4.5.0
Build ID: 0d95498
Last updated: 2021-04-03T00:34:11.075+00:00
"""
from pydantic.validators import bytes_validator # noqa: F401
from fhir.resources import fhirtypes # noqa: F401
from fhir.resources import operationdefinition
def impl_operationdefinition_1(inst):
assert inst.affectsState is False
assert inst.code == "data-requirements"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "fhir@lists.hl7.org"
assert inst.date == fhirtypes.DateTime.validate("2021-04-03T00:34:11+00:00")
assert inst.description == (
"The data-requirements operation aggregates and returns the "
"parameters and data requirements for the measure and all its"
" dependencies as a single module definition"
)
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[0].valueInteger == 3
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "trial-use"
assert inst.id == "Measure-data-requirements"
assert inst.instance is True
assert inst.kind == "operation"
assert inst.name == "data-requirements"
assert inst.parameter[0].max == "1"
assert inst.parameter[0].min == 1
assert inst.parameter[0].name == "periodStart"
assert inst.parameter[0].type == "date"
assert inst.parameter[0].use == "in"
assert inst.parameter[1].documentation == (
"The end of the measurement period. The period will end at "
"the end of the period implied by the supplied timestamp. "
"E.g. a value of 2014 would set the period end to be "
"2014-12-31T23:59:59 inclusive"
)
assert inst.parameter[1].max == "1"
assert inst.parameter[1].min == 1
assert inst.parameter[1].name == "periodEnd"
assert inst.parameter[1].type == "date"
assert inst.parameter[1].use == "in"
assert inst.parameter[2].documentation == (
"The result of the requirements gathering is a module-"
"definition Library that describes the aggregate parameters, "
"data requirements, and dependencies of the measure"
)
assert inst.parameter[2].max == "1"
assert inst.parameter[2].min == 1
assert inst.parameter[2].name == "return"
assert inst.parameter[2].type == "Library"
assert inst.parameter[2].use == "out"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.resource[0] == "Measure"
assert inst.status == "draft"
assert inst.system is False
assert inst.text.status == "extensions"
assert inst.title == "Data Requirements"
assert inst.type is False
assert inst.url == (
"http://hl7.org/fhir/OperationDefinition/Measure-data-" "requirements"
)
assert inst.version == "4.5.0"
def test_operationdefinition_1(base_settings):
"""No. 1 tests collection for OperationDefinition.
Test File: operation-measure-data-requirements.json
"""
filename = (
base_settings["unittest_data_dir"] / "operation-measure-data-requirements.json"
)
inst = operationdefinition.OperationDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "OperationDefinition" == inst.resource_type
impl_operationdefinition_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "OperationDefinition" == data["resourceType"]
inst2 = operationdefinition.OperationDefinition(**data)
impl_operationdefinition_1(inst2)
def impl_operationdefinition_2(inst):
assert inst.affectsState is False
assert inst.code == "apply"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "fhir@lists.hl7.org"
assert inst.date == fhirtypes.DateTime.validate("2021-04-03T00:34:11+00:00")
assert inst.description == (
"The apply operation applies a PlanDefinition to a given " "context"
)
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[0].valueInteger == 3
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "trial-use"
assert inst.id == "PlanDefinition-apply"
assert inst.instance is True
assert inst.kind == "operation"
assert inst.name == "apply"
assert inst.parameter[0].documentation == (
"The plan definition to be applied. If the operation is "
"invoked at the instance level, this parameter is not "
"allowed; if the operation is invoked at the type level, this"
" parameter is required"
)
assert inst.parameter[0].max == "1"
assert inst.parameter[0].min == 0
assert inst.parameter[0].name == "planDefinition"
assert inst.parameter[0].type == "PlanDefinition"
assert inst.parameter[0].use == "in"
assert inst.parameter[1].max == "*"
assert inst.parameter[1].min == 1
assert inst.parameter[1].name == "subject"
assert inst.parameter[1].searchType == "reference"
assert inst.parameter[1].type == "string"
assert inst.parameter[1].use == "in"
assert inst.parameter[2].documentation == "The encounter in context, if any"
assert inst.parameter[2].max == "1"
assert inst.parameter[2].min == 0
assert inst.parameter[2].name == "encounter"
assert inst.parameter[2].searchType == "reference"
assert inst.parameter[2].type == "string"
assert inst.parameter[2].use == "in"
assert (
inst.parameter[3].documentation
== "The practitioner applying the plan definition"
)
assert inst.parameter[3].max == "1"
assert inst.parameter[3].min == 0
assert inst.parameter[3].name == "practitioner"
assert inst.parameter[3].searchType == "reference"
assert inst.parameter[3].type == "string"
assert inst.parameter[3].use == "in"
assert (
inst.parameter[4].documentation
== "The organization applying the plan definition"
)
assert inst.parameter[4].max == "1"
assert inst.parameter[4].min == 0
assert inst.parameter[4].name == "organization"
assert inst.parameter[4].searchType == "reference"
assert inst.parameter[4].type == "string"
assert inst.parameter[4].use == "in"
assert inst.parameter[5].documentation == (
"The type of user initiating the request, e.g. patient, "
"healthcare provider, or specific type of healthcare provider"
" (physician, nurse, etc.)"
)
assert inst.parameter[5].max == "1"
assert inst.parameter[5].min == 0
assert inst.parameter[5].name == "userType"
assert inst.parameter[5].type == "CodeableConcept"
assert inst.parameter[5].use == "in"
assert (
inst.parameter[6].documentation
== "Preferred language of the person using the system"
)
assert inst.parameter[6].max == "1"
assert inst.parameter[6].min == 0
assert inst.parameter[6].name == "userLanguage"
assert inst.parameter[6].type == "CodeableConcept"
assert inst.parameter[6].use == "in"
assert inst.parameter[7].max == "1"
assert inst.parameter[7].min == 0
assert inst.parameter[7].name == "userTaskContext"
assert inst.parameter[7].type == "CodeableConcept"
assert inst.parameter[7].use == "in"
assert inst.parameter[8].documentation == (
"The current setting of the request (inpatient, outpatient, " "etc.)"
)
assert inst.parameter[8].max == "1"
assert inst.parameter[8].min == 0
assert inst.parameter[8].name == "setting"
assert inst.parameter[8].type == "CodeableConcept"
assert inst.parameter[8].use == "in"
assert (
inst.parameter[9].documentation
== "Additional detail about the setting of the request, if any"
)
assert inst.parameter[9].max == "1"
assert inst.parameter[9].min == 0
assert inst.parameter[9].name == "settingContext"
assert inst.parameter[9].type == "CodeableConcept"
assert inst.parameter[9].use == "in"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.resource[0] == "PlanDefinition"
assert inst.status == "draft"
assert inst.system is False
assert inst.text.status == "extensions"
assert inst.title == "Apply"
assert inst.type is True
assert inst.url == "http://hl7.org/fhir/OperationDefinition/PlanDefinition-apply"
assert inst.version == "4.5.0"
def test_operationdefinition_2(base_settings):
"""No. 2 tests collection for OperationDefinition.
Test File: operation-plandefinition-apply.json
"""
filename = (
base_settings["unittest_data_dir"] / "operation-plandefinition-apply.json"
)
inst = operationdefinition.OperationDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "OperationDefinition" == inst.resource_type
impl_operationdefinition_2(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "OperationDefinition" == data["resourceType"]
inst2 = operationdefinition.OperationDefinition(**data)
impl_operationdefinition_2(inst2)
def impl_operationdefinition_3(inst):
assert inst.affectsState is True
assert inst.code == "snapshot"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "fhir@lists.hl7.org"
assert inst.date == fhirtypes.DateTime.validate("2021-04-03T00:34:11+00:00")
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[0].valueInteger == 5
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "trial-use"
assert inst.id == "StructureDefinition-snapshot"
assert inst.instance is True
assert inst.kind == "operation"
assert inst.name == "snapshot"
assert inst.parameter[0].documentation == (
"The [StructureDefinition](structuredefinition.html) is "
"provided directly as part of the request. Servers may choose"
" not to accept profiles in this fashion"
)
assert inst.parameter[0].max == "1"
assert inst.parameter[0].min == 0
assert inst.parameter[0].name == "definition"
assert inst.parameter[0].type == "StructureDefinition"
assert inst.parameter[0].use == "in"
assert inst.parameter[1].documentation == (
"The StructureDefinition's canonical URL (i.e. "
"'StructureDefinition.url'). The server must know the "
"structure definition, or be able to retrieve it from other "
"known repositories."
)
assert inst.parameter[1].max == "1"
assert inst.parameter[1].min == 0
assert inst.parameter[1].name == "url"
assert inst.parameter[1].searchType == "token"
assert inst.parameter[1].type == "string"
assert inst.parameter[1].use == "in"
assert inst.parameter[2].documentation == "The structure definition with a snapshot"
assert inst.parameter[2].max == "1"
assert inst.parameter[2].min == 1
assert inst.parameter[2].name == "return"
assert inst.parameter[2].type == "StructureDefinition"
assert inst.parameter[2].use == "out"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.resource[0] == "StructureDefinition"
assert inst.status == "draft"
assert inst.system is False
assert inst.text.status == "extensions"
assert inst.title == "Generate Snapshot"
assert inst.type is True
assert inst.url == (
"http://hl7.org/fhir/OperationDefinition/StructureDefinition-" "snapshot"
)
assert inst.version == "4.5.0"
def test_operationdefinition_3(base_settings):
"""No. 3 tests collection for OperationDefinition.
Test File: operation-structuredefinition-snapshot.json
"""
filename = (
base_settings["unittest_data_dir"]
/ "operation-structuredefinition-snapshot.json"
)
inst = operationdefinition.OperationDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "OperationDefinition" == inst.resource_type
impl_operationdefinition_3(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "OperationDefinition" == data["resourceType"]
inst2 = operationdefinition.OperationDefinition(**data)
impl_operationdefinition_3(inst2)
def impl_operationdefinition_4(inst):
assert inst.affectsState is False
assert inst.code == "validate-code"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "fhir@lists.hl7.org"
assert inst.date == fhirtypes.DateTime.validate("2021-04-03T00:34:11+00:00")
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[0].valueInteger == 5
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "normative"
assert inst.id == "CodeSystem-validate-code"
assert inst.instance is True
assert inst.kind == "operation"
assert inst.name == "validate-code"
assert inst.parameter[0].documentation == (
"CodeSystem URL. The server must know the code system (e.g. "
"it is defined explicitly in the server'scode systems, or it "
"is known implicitly by the server"
)
assert inst.parameter[0].max == "1"
assert inst.parameter[0].min == 0
assert inst.parameter[0].name == "url"
assert inst.parameter[0].type == "uri"
assert inst.parameter[0].use == "in"
assert inst.parameter[1].max == "1"
assert inst.parameter[1].min == 0
assert inst.parameter[1].name == "codeSystem"
assert inst.parameter[1].type == "CodeSystem"
assert inst.parameter[1].use == "in"
assert inst.parameter[2].documentation == "The code that is to be validated"
assert inst.parameter[2].max == "1"
assert inst.parameter[2].min == 0
assert inst.parameter[2].name == "code"
assert inst.parameter[2].type == "code"
assert inst.parameter[2].use == "in"
assert inst.parameter[3].documentation == (
"The version of the code system, if one was provided in the " "source data"
)
assert inst.parameter[3].max == "1"
assert inst.parameter[3].min == 0
assert inst.parameter[3].name == "version"
assert inst.parameter[3].type == "string"
assert inst.parameter[3].use == "in"
assert inst.parameter[4].max == "1"
assert inst.parameter[4].min == 0
assert inst.parameter[4].name == "display"
assert inst.parameter[4].type == "string"
assert inst.parameter[4].use == "in"
assert inst.parameter[5].documentation == (
"A coding to validate. The system must match the specified " "code system"
)
assert inst.parameter[5].max == "1"
assert inst.parameter[5].min == 0
assert inst.parameter[5].name == "coding"
assert inst.parameter[5].type == "Coding"
assert inst.parameter[5].use == "in"
assert inst.parameter[6].max == "1"
assert inst.parameter[6].min == 0
assert inst.parameter[6].name == "codeableConcept"
assert inst.parameter[6].type == "CodeableConcept"
assert inst.parameter[6].use == "in"
assert inst.parameter[7].max == "1"
assert inst.parameter[7].min == 0
assert inst.parameter[7].name == "date"
assert inst.parameter[7].type == "dateTime"
assert inst.parameter[7].use == "in"
assert inst.parameter[8].max == "1"
assert inst.parameter[8].min == 0
assert inst.parameter[8].name == "abstract"
assert inst.parameter[8].type == "boolean"
assert inst.parameter[8].use == "in"
assert inst.parameter[9].documentation == (
"Specifies the language to be used for description when "
"validating the display property"
)
assert inst.parameter[9].max == "1"
assert inst.parameter[9].min == 0
assert inst.parameter[9].name == "displayLanguage"
assert inst.parameter[9].type == "code"
assert inst.parameter[9].use == "in"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.resource[0] == "CodeSystem"
assert inst.status == "active"
assert inst.system is False
assert inst.text.status == "extensions"
assert inst.title == "Code System based Validation"
assert inst.type is True
assert inst.url == (
"http://hl7.org/fhir/OperationDefinition/CodeSystem-validate-" "code"
)
assert inst.version == "4.5.0"
def test_operationdefinition_4(base_settings):
"""No. 4 tests collection for OperationDefinition.
Test File: operation-codesystem-validate-code.json
"""
filename = (
base_settings["unittest_data_dir"] / "operation-codesystem-validate-code.json"
)
inst = operationdefinition.OperationDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "OperationDefinition" == inst.resource_type
impl_operationdefinition_4(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "OperationDefinition" == data["resourceType"]
inst2 = operationdefinition.OperationDefinition(**data)
impl_operationdefinition_4(inst2)
def impl_operationdefinition_5(inst):
assert inst.affectsState is False
assert inst.code == "validate"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "fhir@lists.hl7.org"
assert inst.date == fhirtypes.DateTime.validate("2021-04-03T00:34:11+00:00")
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[0].valueInteger == 5
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "normative"
assert inst.id == "Resource-validate"
assert inst.instance is True
assert inst.kind == "operation"
assert inst.name == "validate"
assert (
inst.parameter[0].documentation == 'Must be present unless the mode is "delete"'
)
assert inst.parameter[0].max == "1"
assert inst.parameter[0].min == 0
assert inst.parameter[0].name == "resource"
assert inst.parameter[0].type == "Resource"
assert inst.parameter[0].use == "in"
assert inst.parameter[1].binding.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/elementdefinition-" "bindingName"
)
assert (
inst.parameter[1].binding.extension[0].valueString == "ResourceValidationMode"
)
assert inst.parameter[1].binding.strength == "required"
assert inst.parameter[1].binding.valueSet == (
"http://hl7.org/fhir/ValueSet/resource-validation-" "mode|4.5.0|4.5.0"
)
assert (
inst.parameter[1].documentation
== "Default is 'no action'; (e.g. general validation)"
)
assert inst.parameter[1].max == "1"
assert inst.parameter[1].min == 0
assert inst.parameter[1].name == "mode"
assert inst.parameter[1].type == "code"
assert inst.parameter[1].use == "in"
assert inst.parameter[2].documentation == (
"If this is nominated, then the resource is validated against"
" this specific profile. If a profile is nominated, and the "
"server cannot validate against the nominated profile, it "
"SHALL return an error"
)
assert inst.parameter[2].max == "1"
assert inst.parameter[2].min == 0
assert inst.parameter[2].name == "profile"
assert inst.parameter[2].type == "uri"
assert inst.parameter[2].use == "in"
assert inst.parameter[3].max == "1"
assert inst.parameter[3].min == 1
assert inst.parameter[3].name == "return"
assert inst.parameter[3].type == "OperationOutcome"
assert inst.parameter[3].use == "out"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.resource[0] == "Resource"
assert inst.status == "active"
assert inst.system is False
assert inst.text.status == "extensions"
assert inst.title == "Validate a resource"
assert inst.type is True
assert inst.url == "http://hl7.org/fhir/OperationDefinition/Resource-validate"
assert inst.version == "4.5.0"
def test_operationdefinition_5(base_settings):
"""No. 5 tests collection for OperationDefinition.
Test File: operation-resource-validate.json
"""
filename = base_settings["unittest_data_dir"] / "operation-resource-validate.json"
inst = operationdefinition.OperationDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "OperationDefinition" == inst.resource_type
impl_operationdefinition_5(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "OperationDefinition" == data["resourceType"]
inst2 = operationdefinition.OperationDefinition(**data)
impl_operationdefinition_5(inst2)
def impl_operationdefinition_6(inst):
assert inst.affectsState is False
assert inst.code == "questionnaire"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "fhir@lists.hl7.org"
assert inst.date == fhirtypes.DateTime.validate("2021-04-03T00:34:11+00:00")
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[0].valueInteger == 5
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "trial-use"
assert inst.id == "StructureDefinition-questionnaire"
assert inst.instance is True
assert inst.kind == "operation"
assert inst.name == "questionnaire"
assert inst.parameter[0].documentation == (
"A logical identifier (i.e. "
"'StructureDefinition.identifier''). The server must know the"
" StructureDefinition or be able to retrieve it from other "
"known repositories."
)
assert inst.parameter[0].max == "1"
assert inst.parameter[0].min == 0
assert inst.parameter[0].name == "identifier"
assert (
inst.parameter[0].targetProfile[0]
== "http://hl7.org/fhir/StructureDefinition/StructureDefinition"
)
assert inst.parameter[0].type == "Identifier"
assert inst.parameter[0].use == "in"
assert inst.parameter[1].documentation == (
"The [StructureDefinition](structuredefinition.html) is "
"provided directly as part of the request. Servers may choose"
" not to accept profiles in this fashion"
)
assert inst.parameter[1].max == "1"
assert inst.parameter[1].min == 0
assert inst.parameter[1].name == "profile"
assert inst.parameter[1].searchType == "token"
assert inst.parameter[1].type == "StructureDefinition"
assert inst.parameter[1].use == "in"
assert inst.parameter[2].documentation == (
"The StructureDefinition's official URL (i.e. "
"'StructureDefinition.url'). The server must know the "
"StructureDefinition or be able to retrieve it from other "
"known repositories."
)
assert inst.parameter[2].max == "1"
assert inst.parameter[2].min == 0
assert inst.parameter[2].name == "url"
assert (
inst.parameter[2].targetProfile[0]
== "http://hl7.org/fhir/StructureDefinition/StructureDefinition"
)
assert inst.parameter[2].type == "canonical"
assert inst.parameter[2].use == "in"
assert inst.parameter[3].documentation == (
"If true, the questionnaire will only include those elements "
"marked as \"mustSupport='true'\" in the StructureDefinition."
)
assert inst.parameter[3].max == "1"
assert inst.parameter[3].min == 0
assert inst.parameter[3].name == "supportedOnly"
assert inst.parameter[3].type == "boolean"
assert inst.parameter[3].use == "in"
assert inst.parameter[4].documentation == (
"The questionnaire form generated based on the " "StructureDefinition."
)
assert inst.parameter[4].max == "1"
assert inst.parameter[4].min == 1
assert inst.parameter[4].name == "return"
assert inst.parameter[4].type == "Questionnaire"
assert inst.parameter[4].use == "out"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.resource[0] == "StructureDefinition"
assert inst.status == "draft"
assert inst.system is False
assert inst.text.status == "extensions"
assert inst.title == "Build Questionnaire"
assert inst.type is True
assert inst.url == (
"http://hl7.org/fhir/OperationDefinition/StructureDefinition-" "questionnaire"
)
assert inst.version == "4.5.0"
def test_operationdefinition_6(base_settings):
"""No. 6 tests collection for OperationDefinition.
Test File: operation-structuredefinition-questionnaire.json
"""
filename = (
base_settings["unittest_data_dir"]
/ "operation-structuredefinition-questionnaire.json"
)
inst = operationdefinition.OperationDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "OperationDefinition" == inst.resource_type
impl_operationdefinition_6(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "OperationDefinition" == data["resourceType"]
inst2 = operationdefinition.OperationDefinition(**data)
impl_operationdefinition_6(inst2)
def impl_operationdefinition_7(inst):
assert inst.affectsState is False
assert inst.code == "translate-id"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "fhir@lists.hl7.org"
assert inst.date == fhirtypes.DateTime.validate("2021-04-03T00:34:11+00:00")
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[0].valueInteger == 1
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "trial-use"
assert inst.id == "NamingSystem-translate-id"
assert inst.instance is False
assert inst.kind == "operation"
assert inst.name == "translate-id"
assert inst.parameter[0].max == "1"
assert inst.parameter[0].min == 1
assert inst.parameter[0].name == "id"
assert inst.parameter[0].type == "string"
assert inst.parameter[0].use == "in"
assert inst.parameter[1].binding.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/elementdefinition-" "bindingName"
)
assert (
inst.parameter[1].binding.extension[0].valueString
== "NamingSystemIdentifierType"
)
assert inst.parameter[1].binding.strength == "required"
assert inst.parameter[1].binding.valueSet == (
"http://hl7.org/fhir/ValueSet/namingsystem-identifier-" "type|4.5.0|4.5.0"
)
assert inst.parameter[1].max == "1"
assert inst.parameter[1].min == 1
assert inst.parameter[1].name == "sourceType"
assert inst.parameter[1].type == "code"
assert inst.parameter[1].use == "in"
assert inst.parameter[2].binding.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/elementdefinition-" "bindingName"
)
assert (
inst.parameter[2].binding.extension[0].valueString
== "NamingSystemIdentifierType"
)
assert inst.parameter[2].binding.strength == "required"
assert inst.parameter[2].binding.valueSet == (
"http://hl7.org/fhir/ValueSet/namingsystem-identifier-" "type|4.5.0|4.5.0"
)
assert inst.parameter[2].max == "1"
assert inst.parameter[2].min == 1
assert inst.parameter[2].name == "targetType"
assert inst.parameter[2].type == "code"
assert inst.parameter[2].use == "in"
assert inst.parameter[3].documentation == (
"If preferredOnly = true then return only the preferred "
"identifier, or if preferredOnly = false then return all "
"available ids."
)
assert inst.parameter[3].max == "1"
assert inst.parameter[3].min == 0
assert inst.parameter[3].name == "preferredOnly"
assert inst.parameter[3].type == "boolean"
assert inst.parameter[3].use == "in"
assert inst.parameter[4].documentation == (
"If 'date' is supplied return only ids that have a validity "
"period that includes that date."
)
assert inst.parameter[4].max == "1"
assert inst.parameter[4].min == 0
assert inst.parameter[4].name == "date"
assert inst.parameter[4].type == "dateTime"
assert inst.parameter[4].use == "in"
assert (
inst.parameter[5].documentation
== "True if the identifier could be translated successfully."
)
assert inst.parameter[5].max == "1"
assert inst.parameter[5].min == 1
assert inst.parameter[5].name == "result"
assert inst.parameter[5].type == "boolean"
assert inst.parameter[5].use == "out"
assert (
inst.parameter[6].documentation
== "The target identifer(s) of the requested type"
)
assert inst.parameter[6].max == "*"
assert inst.parameter[6].min == 0
assert inst.parameter[6].name == "targetIdentifier"
assert inst.parameter[6].type == "boolean"
assert inst.parameter[6].use == "out"
assert (
inst.parameter[7].documentation == "Whether the target identifier is preferred."
)
assert inst.parameter[7].max == "1"
assert inst.parameter[7].min == 0
assert inst.parameter[7].name == "targetIdentifer.preferred"
assert inst.parameter[7].type == "boolean"
assert inst.parameter[7].use == "out"
assert (
inst.parameter[8].documentation
== "The perioid when the target identifier is valid."
)
assert inst.parameter[8].max == "1"
assert inst.parameter[8].min == 0
assert inst.parameter[8].name == "targetIdentifier.period"
assert inst.parameter[8].type == "boolean"
assert inst.parameter[8].use == "out"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.resource[0] == "NamingSystem"
assert inst.status == "draft"
assert inst.system is False
assert inst.text.status == "extensions"
assert inst.title == "Translate id"
assert inst.type is True
assert inst.url == (
"http://hl7.org/fhir/OperationDefinition/NamingSystem-" "translate-id"
)
assert inst.version == "4.5.0"
def test_operationdefinition_7(base_settings):
"""No. 7 tests collection for OperationDefinition.
Test File: operation-namingsystem-translate-id.json
"""
filename = (
base_settings["unittest_data_dir"] / "operation-namingsystem-translate-id.json"
)
inst = operationdefinition.OperationDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "OperationDefinition" == inst.resource_type
impl_operationdefinition_7(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "OperationDefinition" == data["resourceType"]
inst2 = operationdefinition.OperationDefinition(**data)
impl_operationdefinition_7(inst2)
def impl_operationdefinition_8(inst):
assert inst.affectsState is False
assert inst.code == "subsumes"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "fhir@lists.hl7.org"
assert inst.date == fhirtypes.DateTime.validate("2021-04-03T00:34:11+00:00")
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[0].valueInteger == 5
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "normative"
assert inst.id == "CodeSystem-subsumes"
assert inst.instance is True
assert inst.kind == "operation"
assert inst.name == "subsumes"
assert inst.parameter[0].documentation == (
'The "A" code that is to be tested. If a code is provided, '
"a system must be provided"
)
assert inst.parameter[0].max == "1"
assert inst.parameter[0].min == 0
assert inst.parameter[0].name == "codeA"
assert inst.parameter[0].type == "code"
assert inst.parameter[0].use == "in"
assert inst.parameter[1].documentation == (
'The "B" code that is to be tested. If a code is provided, '
"a system must be provided"
)
assert inst.parameter[1].max == "1"
assert inst.parameter[1].min == 0
assert inst.parameter[1].name == "codeB"
assert inst.parameter[1].type == "code"
assert inst.parameter[1].use == "in"
assert inst.parameter[2].documentation == (
"The code system in which subsumption testing is to be "
"performed. This must be provided unless the operation is "
"invoked on a code system instance"
)
assert inst.parameter[2].max == "1"
assert inst.parameter[2].min == 0
assert inst.parameter[2].name == "system"
assert inst.parameter[2].type == "uri"
assert inst.parameter[2].use == "in"
assert inst.parameter[3].documentation == (
"The version of the code system, if one was provided in the " "source data"
)
assert inst.parameter[3].max == "1"
assert inst.parameter[3].min == 0
assert inst.parameter[3].name == "version"
assert inst.parameter[3].type == "string"
assert inst.parameter[3].use == "in"
assert inst.parameter[4].documentation == (
'The "A" Coding that is to be tested. The code system does '
"not have to match the specified subsumption code system, but"
" the relationships between the code systems must be well "
"established"
)
assert inst.parameter[4].max == "1"
assert inst.parameter[4].min == 0
assert inst.parameter[4].name == "codingA"
assert inst.parameter[4].type == "Coding"
assert inst.parameter[4].use == "in"
assert inst.parameter[5].documentation == (
'The "B" Coding that is to be tested. The code system does '
"not have to match the specified subsumption code system, but"
" the relationships between the code systems must be well "
"established"
)
assert inst.parameter[5].max == "1"
assert inst.parameter[5].min == 0
assert inst.parameter[5].name == "codingB"
assert inst.parameter[5].type == "Coding"
assert inst.parameter[5].use == "in"
assert inst.parameter[6].binding.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/elementdefinition-" "bindingName"
)
assert (
inst.parameter[6].binding.extension[0].valueString
== "ConceptSubsumptionOutcome"
)
assert inst.parameter[6].binding.strength == "required"
assert inst.parameter[6].binding.valueSet == (
"http://hl7.org/fhir/ValueSet/concept-subsumption-" "outcome|4.5.0|4.5.0"
)
assert inst.parameter[6].max == "1"
assert inst.parameter[6].min == 1
assert inst.parameter[6].name == "outcome"
assert inst.parameter[6].type == "code"
assert inst.parameter[6].use == "out"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.resource[0] == "CodeSystem"
assert inst.status == "active"
assert inst.system is False
assert inst.text.status == "extensions"
assert inst.title == "Subsumption Testing"
assert inst.type is True
assert inst.url == "http://hl7.org/fhir/OperationDefinition/CodeSystem-subsumes"
assert inst.version == "4.5.0"
def test_operationdefinition_8(base_settings):
"""No. 8 tests collection for OperationDefinition.
Test File: operation-codesystem-subsumes.json
"""
filename = base_settings["unittest_data_dir"] / "operation-codesystem-subsumes.json"
inst = operationdefinition.OperationDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "OperationDefinition" == inst.resource_type
impl_operationdefinition_8(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "OperationDefinition" == data["resourceType"]
inst2 = operationdefinition.OperationDefinition(**data)
impl_operationdefinition_8(inst2)
def impl_operationdefinition_9(inst):
assert inst.affectsState is False
assert inst.code == "data-requirements"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "fhir@lists.hl7.org"
assert inst.date == fhirtypes.DateTime.validate("2021-04-03T00:34:11+00:00")
assert inst.description == (
"The data-requirements operation aggregates and returns the "
"parameters and data requirements for a resource and all its "
"dependencies as a single module definition"
)
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[0].valueInteger == 3
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "trial-use"
assert inst.id == "Library-data-requirements"
assert inst.instance is True
assert inst.kind == "operation"
assert inst.name == "data-requirements"
assert (
inst.parameter[0].documentation
== "The target of the data requirements operation"
)
assert inst.parameter[0].max == "1"
assert inst.parameter[0].min == 0
assert inst.parameter[0].name == "target"
assert inst.parameter[0].type == "string"
assert inst.parameter[0].use == "in"
assert inst.parameter[1].documentation == "The result of the requirements gathering"
assert inst.parameter[1].max == "1"
assert inst.parameter[1].min == 1
assert inst.parameter[1].name == "return"
assert inst.parameter[1].type == "Library"
assert inst.parameter[1].use == "out"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.resource[0] == "Library"
assert inst.status == "draft"
assert inst.system is True
assert inst.text.status == "extensions"
assert inst.title == "Data Requirements"
assert inst.type is False
assert inst.url == (
"http://hl7.org/fhir/OperationDefinition/Library-data-" "requirements"
)
assert inst.version == "4.5.0"
def test_operationdefinition_9(base_settings):
"""No. 9 tests collection for OperationDefinition.
Test File: operation-library-data-requirements.json
"""
filename = (
base_settings["unittest_data_dir"] / "operation-library-data-requirements.json"
)
inst = operationdefinition.OperationDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "OperationDefinition" == inst.resource_type
impl_operationdefinition_9(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "OperationDefinition" == data["resourceType"]
inst2 = operationdefinition.OperationDefinition(**data)
impl_operationdefinition_9(inst2)
def impl_operationdefinition_10(inst):
assert inst.affectsState is True
assert inst.code == "process-message"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "fhir@lists.hl7.org"
assert inst.date == fhirtypes.DateTime.validate("2021-04-03T00:34:11+00:00")
assert inst.extension[0].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-" "fmm"
)
assert inst.extension[0].valueInteger == 4
assert inst.extension[1].url == (
"http://hl7.org/fhir/StructureDefinition/structuredefinition-"
"standards-status"
)
assert inst.extension[1].valueCode == "trial-use"
assert inst.id == "MessageHeader-process-message"
assert inst.instance is False
assert inst.kind == "operation"
assert inst.name == "process-message"
assert inst.parameter[0].documentation == (
"The message to process (or, if using asynchronous messaging,"
" it may be a response message to accept)"
)
assert inst.parameter[0].max == "1"
assert inst.parameter[0].min == 1
assert inst.parameter[0].name == "content"
assert inst.parameter[0].type == "Bundle"
assert inst.parameter[0].use == "in"
assert inst.parameter[1].documentation == (
"If 'true' the message is processed using the asynchronous " "messaging pattern"
)
assert inst.parameter[1].max == "1"
assert inst.parameter[1].min == 0
assert inst.parameter[1].name == "async"
assert inst.parameter[1].type == "boolean"
assert inst.parameter[1].use == "in"
assert inst.parameter[2].documentation == (
"A URL to submit response messages to, if asynchronous "
"messaging is being used, and if the "
"MessageHeader.source.endpoint is not the appropriate place "
"to submit responses"
)
assert inst.parameter[2].max == "1"
assert inst.parameter[2].min == 0
assert inst.parameter[2].name == "response-url"
assert inst.parameter[2].type == "url"
assert inst.parameter[2].use == "in"
assert inst.parameter[3].documentation == (
"A response message, if synchronous messaging is being used "
"(mandatory in this case). For asynchronous messaging, there "
"is no return value"
)
assert inst.parameter[3].max == "1"
assert inst.parameter[3].min == 0
assert inst.parameter[3].name == "return"
assert inst.parameter[3].type == "Bundle"
assert inst.parameter[3].use == "out"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.resource[0] == "MessageHeader"
assert inst.status == "draft"
assert inst.system is True
assert inst.text.status == "extensions"
assert inst.title == "Process Message"
assert inst.type is False
assert inst.url == (
"http://hl7.org/fhir/OperationDefinition/MessageHeader-" "process-message"
)
assert inst.version == "4.5.0"
def test_operationdefinition_10(base_settings):
"""No. 10 tests collection for OperationDefinition.
Test File: operation-messageheader-process-message.json
"""
filename = (
base_settings["unittest_data_dir"]
/ "operation-messageheader-process-message.json"
)
inst = operationdefinition.OperationDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "OperationDefinition" == inst.resource_type
impl_operationdefinition_10(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "OperationDefinition" == data["resourceType"]
inst2 = operationdefinition.OperationDefinition(**data)
impl_operationdefinition_10(inst2)
| 40.820327
| 88
| 0.66926
| 5,534
| 44,984
| 5.412902
| 0.065775
| 0.198965
| 0.223903
| 0.046069
| 0.864831
| 0.798832
| 0.786647
| 0.768286
| 0.747555
| 0.717376
| 0
| 0.029299
| 0.19498
| 44,984
| 1,101
| 89
| 40.857402
| 0.79789
| 0.041081
| 0
| 0.590031
| 0
| 0
| 0.282274
| 0.028737
| 0
| 0
| 0
| 0
| 0.626653
| 1
| 0.020346
| false
| 0
| 0.003052
| 0
| 0.023398
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
91ba44d608031eee5c846fd2319e7789d9a0ca84
| 96,016
|
py
|
Python
|
sdk/python/pulumi_aws/lb/_inputs.py
|
Otanikotani/pulumi-aws
|
00e2b352da42c5b1b0ec7b4760eec5ad2b23ff21
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/lb/_inputs.py
|
Otanikotani/pulumi-aws
|
00e2b352da42c5b1b0ec7b4760eec5ad2b23ff21
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/lb/_inputs.py
|
Otanikotani/pulumi-aws
|
00e2b352da42c5b1b0ec7b4760eec5ad2b23ff21
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'ListenerDefaultActionArgs',
'ListenerDefaultActionAuthenticateCognitoArgs',
'ListenerDefaultActionAuthenticateOidcArgs',
'ListenerDefaultActionFixedResponseArgs',
'ListenerDefaultActionForwardArgs',
'ListenerDefaultActionForwardStickinessArgs',
'ListenerDefaultActionForwardTargetGroupArgs',
'ListenerDefaultActionRedirectArgs',
'ListenerRuleActionArgs',
'ListenerRuleActionAuthenticateCognitoArgs',
'ListenerRuleActionAuthenticateOidcArgs',
'ListenerRuleActionFixedResponseArgs',
'ListenerRuleActionForwardArgs',
'ListenerRuleActionForwardStickinessArgs',
'ListenerRuleActionForwardTargetGroupArgs',
'ListenerRuleActionRedirectArgs',
'ListenerRuleConditionArgs',
'ListenerRuleConditionHostHeaderArgs',
'ListenerRuleConditionHttpHeaderArgs',
'ListenerRuleConditionHttpRequestMethodArgs',
'ListenerRuleConditionPathPatternArgs',
'ListenerRuleConditionQueryStringArgs',
'ListenerRuleConditionSourceIpArgs',
'LoadBalancerAccessLogsArgs',
'LoadBalancerSubnetMappingArgs',
'TargetGroupHealthCheckArgs',
'TargetGroupStickinessArgs',
]
@pulumi.input_type
class ListenerDefaultActionArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
authenticate_cognito: Optional[pulumi.Input['ListenerDefaultActionAuthenticateCognitoArgs']] = None,
authenticate_oidc: Optional[pulumi.Input['ListenerDefaultActionAuthenticateOidcArgs']] = None,
fixed_response: Optional[pulumi.Input['ListenerDefaultActionFixedResponseArgs']] = None,
forward: Optional[pulumi.Input['ListenerDefaultActionForwardArgs']] = None,
order: Optional[pulumi.Input[int]] = None,
redirect: Optional[pulumi.Input['ListenerDefaultActionRedirectArgs']] = None,
target_group_arn: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] type: The type of routing action. Valid values are `forward`, `redirect`, `fixed-response`, `authenticate-cognito` and `authenticate-oidc`.
:param pulumi.Input['ListenerDefaultActionFixedResponseArgs'] fixed_response: Information for creating an action that returns a custom HTTP response. Required if `type` is `fixed-response`.
:param pulumi.Input['ListenerDefaultActionForwardArgs'] forward: Information for creating an action that distributes requests among one or more target groups. Specify only if `type` is `forward`. If you specify both `forward` block and `target_group_arn` attribute, you can specify only one target group using `forward` and it must be the same target group specified in `target_group_arn`.
:param pulumi.Input['ListenerDefaultActionRedirectArgs'] redirect: Information for creating a redirect action. Required if `type` is `redirect`.
:param pulumi.Input[str] target_group_arn: The ARN of the Target Group to which to route traffic. Specify only if `type` is `forward` and you want to route to a single target group. To route to one or more target groups, use a `forward` block instead.
"""
pulumi.set(__self__, "type", type)
if authenticate_cognito is not None:
pulumi.set(__self__, "authenticate_cognito", authenticate_cognito)
if authenticate_oidc is not None:
pulumi.set(__self__, "authenticate_oidc", authenticate_oidc)
if fixed_response is not None:
pulumi.set(__self__, "fixed_response", fixed_response)
if forward is not None:
pulumi.set(__self__, "forward", forward)
if order is not None:
pulumi.set(__self__, "order", order)
if redirect is not None:
pulumi.set(__self__, "redirect", redirect)
if target_group_arn is not None:
pulumi.set(__self__, "target_group_arn", target_group_arn)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of routing action. Valid values are `forward`, `redirect`, `fixed-response`, `authenticate-cognito` and `authenticate-oidc`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="authenticateCognito")
def authenticate_cognito(self) -> Optional[pulumi.Input['ListenerDefaultActionAuthenticateCognitoArgs']]:
return pulumi.get(self, "authenticate_cognito")
@authenticate_cognito.setter
def authenticate_cognito(self, value: Optional[pulumi.Input['ListenerDefaultActionAuthenticateCognitoArgs']]):
pulumi.set(self, "authenticate_cognito", value)
@property
@pulumi.getter(name="authenticateOidc")
def authenticate_oidc(self) -> Optional[pulumi.Input['ListenerDefaultActionAuthenticateOidcArgs']]:
return pulumi.get(self, "authenticate_oidc")
@authenticate_oidc.setter
def authenticate_oidc(self, value: Optional[pulumi.Input['ListenerDefaultActionAuthenticateOidcArgs']]):
pulumi.set(self, "authenticate_oidc", value)
@property
@pulumi.getter(name="fixedResponse")
def fixed_response(self) -> Optional[pulumi.Input['ListenerDefaultActionFixedResponseArgs']]:
"""
Information for creating an action that returns a custom HTTP response. Required if `type` is `fixed-response`.
"""
return pulumi.get(self, "fixed_response")
@fixed_response.setter
def fixed_response(self, value: Optional[pulumi.Input['ListenerDefaultActionFixedResponseArgs']]):
pulumi.set(self, "fixed_response", value)
@property
@pulumi.getter
def forward(self) -> Optional[pulumi.Input['ListenerDefaultActionForwardArgs']]:
"""
Information for creating an action that distributes requests among one or more target groups. Specify only if `type` is `forward`. If you specify both `forward` block and `target_group_arn` attribute, you can specify only one target group using `forward` and it must be the same target group specified in `target_group_arn`.
"""
return pulumi.get(self, "forward")
@forward.setter
def forward(self, value: Optional[pulumi.Input['ListenerDefaultActionForwardArgs']]):
pulumi.set(self, "forward", value)
@property
@pulumi.getter
def order(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "order")
@order.setter
def order(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "order", value)
@property
@pulumi.getter
def redirect(self) -> Optional[pulumi.Input['ListenerDefaultActionRedirectArgs']]:
"""
Information for creating a redirect action. Required if `type` is `redirect`.
"""
return pulumi.get(self, "redirect")
@redirect.setter
def redirect(self, value: Optional[pulumi.Input['ListenerDefaultActionRedirectArgs']]):
pulumi.set(self, "redirect", value)
@property
@pulumi.getter(name="targetGroupArn")
def target_group_arn(self) -> Optional[pulumi.Input[str]]:
"""
The ARN of the Target Group to which to route traffic. Specify only if `type` is `forward` and you want to route to a single target group. To route to one or more target groups, use a `forward` block instead.
"""
return pulumi.get(self, "target_group_arn")
@target_group_arn.setter
def target_group_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_group_arn", value)
@pulumi.input_type
class ListenerDefaultActionAuthenticateCognitoArgs:
def __init__(__self__, *,
user_pool_arn: pulumi.Input[str],
user_pool_client_id: pulumi.Input[str],
user_pool_domain: pulumi.Input[str],
authentication_request_extra_params: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
on_unauthenticated_request: Optional[pulumi.Input[str]] = None,
scope: Optional[pulumi.Input[str]] = None,
session_cookie_name: Optional[pulumi.Input[str]] = None,
session_timeout: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] user_pool_arn: The ARN of the Cognito user pool.
:param pulumi.Input[str] user_pool_client_id: The ID of the Cognito user pool client.
:param pulumi.Input[str] user_pool_domain: The domain prefix or fully-qualified domain name of the Cognito user pool.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] authentication_request_extra_params: The query parameters to include in the redirect request to the authorization endpoint. Max: 10.
:param pulumi.Input[str] on_unauthenticated_request: The behavior if the user is not authenticated. Valid values: `deny`, `allow` and `authenticate`
:param pulumi.Input[str] scope: The set of user claims to be requested from the IdP.
:param pulumi.Input[str] session_cookie_name: The name of the cookie used to maintain session information.
:param pulumi.Input[int] session_timeout: The maximum duration of the authentication session, in seconds.
"""
pulumi.set(__self__, "user_pool_arn", user_pool_arn)
pulumi.set(__self__, "user_pool_client_id", user_pool_client_id)
pulumi.set(__self__, "user_pool_domain", user_pool_domain)
if authentication_request_extra_params is not None:
pulumi.set(__self__, "authentication_request_extra_params", authentication_request_extra_params)
if on_unauthenticated_request is not None:
pulumi.set(__self__, "on_unauthenticated_request", on_unauthenticated_request)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if session_cookie_name is not None:
pulumi.set(__self__, "session_cookie_name", session_cookie_name)
if session_timeout is not None:
pulumi.set(__self__, "session_timeout", session_timeout)
@property
@pulumi.getter(name="userPoolArn")
def user_pool_arn(self) -> pulumi.Input[str]:
"""
The ARN of the Cognito user pool.
"""
return pulumi.get(self, "user_pool_arn")
@user_pool_arn.setter
def user_pool_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "user_pool_arn", value)
@property
@pulumi.getter(name="userPoolClientId")
def user_pool_client_id(self) -> pulumi.Input[str]:
"""
The ID of the Cognito user pool client.
"""
return pulumi.get(self, "user_pool_client_id")
@user_pool_client_id.setter
def user_pool_client_id(self, value: pulumi.Input[str]):
pulumi.set(self, "user_pool_client_id", value)
@property
@pulumi.getter(name="userPoolDomain")
def user_pool_domain(self) -> pulumi.Input[str]:
"""
The domain prefix or fully-qualified domain name of the Cognito user pool.
"""
return pulumi.get(self, "user_pool_domain")
@user_pool_domain.setter
def user_pool_domain(self, value: pulumi.Input[str]):
pulumi.set(self, "user_pool_domain", value)
@property
@pulumi.getter(name="authenticationRequestExtraParams")
def authentication_request_extra_params(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The query parameters to include in the redirect request to the authorization endpoint. Max: 10.
"""
return pulumi.get(self, "authentication_request_extra_params")
@authentication_request_extra_params.setter
def authentication_request_extra_params(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "authentication_request_extra_params", value)
@property
@pulumi.getter(name="onUnauthenticatedRequest")
def on_unauthenticated_request(self) -> Optional[pulumi.Input[str]]:
"""
The behavior if the user is not authenticated. Valid values: `deny`, `allow` and `authenticate`
"""
return pulumi.get(self, "on_unauthenticated_request")
@on_unauthenticated_request.setter
def on_unauthenticated_request(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "on_unauthenticated_request", value)
@property
@pulumi.getter
def scope(self) -> Optional[pulumi.Input[str]]:
"""
The set of user claims to be requested from the IdP.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scope", value)
@property
@pulumi.getter(name="sessionCookieName")
def session_cookie_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the cookie used to maintain session information.
"""
return pulumi.get(self, "session_cookie_name")
@session_cookie_name.setter
def session_cookie_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "session_cookie_name", value)
@property
@pulumi.getter(name="sessionTimeout")
def session_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The maximum duration of the authentication session, in seconds.
"""
return pulumi.get(self, "session_timeout")
@session_timeout.setter
def session_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "session_timeout", value)
@pulumi.input_type
class ListenerDefaultActionAuthenticateOidcArgs:
def __init__(__self__, *,
authorization_endpoint: pulumi.Input[str],
client_id: pulumi.Input[str],
client_secret: pulumi.Input[str],
issuer: pulumi.Input[str],
token_endpoint: pulumi.Input[str],
user_info_endpoint: pulumi.Input[str],
authentication_request_extra_params: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
on_unauthenticated_request: Optional[pulumi.Input[str]] = None,
scope: Optional[pulumi.Input[str]] = None,
session_cookie_name: Optional[pulumi.Input[str]] = None,
session_timeout: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] authorization_endpoint: The authorization endpoint of the IdP.
:param pulumi.Input[str] client_id: The OAuth 2.0 client identifier.
:param pulumi.Input[str] client_secret: The OAuth 2.0 client secret.
:param pulumi.Input[str] issuer: The OIDC issuer identifier of the IdP.
:param pulumi.Input[str] token_endpoint: The token endpoint of the IdP.
:param pulumi.Input[str] user_info_endpoint: The user info endpoint of the IdP.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] authentication_request_extra_params: The query parameters to include in the redirect request to the authorization endpoint. Max: 10.
:param pulumi.Input[str] on_unauthenticated_request: The behavior if the user is not authenticated. Valid values: `deny`, `allow` and `authenticate`
:param pulumi.Input[str] scope: The set of user claims to be requested from the IdP.
:param pulumi.Input[str] session_cookie_name: The name of the cookie used to maintain session information.
:param pulumi.Input[int] session_timeout: The maximum duration of the authentication session, in seconds.
"""
pulumi.set(__self__, "authorization_endpoint", authorization_endpoint)
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "client_secret", client_secret)
pulumi.set(__self__, "issuer", issuer)
pulumi.set(__self__, "token_endpoint", token_endpoint)
pulumi.set(__self__, "user_info_endpoint", user_info_endpoint)
if authentication_request_extra_params is not None:
pulumi.set(__self__, "authentication_request_extra_params", authentication_request_extra_params)
if on_unauthenticated_request is not None:
pulumi.set(__self__, "on_unauthenticated_request", on_unauthenticated_request)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if session_cookie_name is not None:
pulumi.set(__self__, "session_cookie_name", session_cookie_name)
if session_timeout is not None:
pulumi.set(__self__, "session_timeout", session_timeout)
@property
@pulumi.getter(name="authorizationEndpoint")
def authorization_endpoint(self) -> pulumi.Input[str]:
"""
The authorization endpoint of the IdP.
"""
return pulumi.get(self, "authorization_endpoint")
@authorization_endpoint.setter
def authorization_endpoint(self, value: pulumi.Input[str]):
pulumi.set(self, "authorization_endpoint", value)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> pulumi.Input[str]:
"""
The OAuth 2.0 client identifier.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: pulumi.Input[str]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> pulumi.Input[str]:
"""
The OAuth 2.0 client secret.
"""
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: pulumi.Input[str]):
pulumi.set(self, "client_secret", value)
@property
@pulumi.getter
def issuer(self) -> pulumi.Input[str]:
"""
The OIDC issuer identifier of the IdP.
"""
return pulumi.get(self, "issuer")
@issuer.setter
def issuer(self, value: pulumi.Input[str]):
pulumi.set(self, "issuer", value)
@property
@pulumi.getter(name="tokenEndpoint")
def token_endpoint(self) -> pulumi.Input[str]:
"""
The token endpoint of the IdP.
"""
return pulumi.get(self, "token_endpoint")
@token_endpoint.setter
def token_endpoint(self, value: pulumi.Input[str]):
pulumi.set(self, "token_endpoint", value)
@property
@pulumi.getter(name="userInfoEndpoint")
def user_info_endpoint(self) -> pulumi.Input[str]:
"""
The user info endpoint of the IdP.
"""
return pulumi.get(self, "user_info_endpoint")
@user_info_endpoint.setter
def user_info_endpoint(self, value: pulumi.Input[str]):
pulumi.set(self, "user_info_endpoint", value)
@property
@pulumi.getter(name="authenticationRequestExtraParams")
def authentication_request_extra_params(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The query parameters to include in the redirect request to the authorization endpoint. Max: 10.
"""
return pulumi.get(self, "authentication_request_extra_params")
@authentication_request_extra_params.setter
def authentication_request_extra_params(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "authentication_request_extra_params", value)
@property
@pulumi.getter(name="onUnauthenticatedRequest")
def on_unauthenticated_request(self) -> Optional[pulumi.Input[str]]:
"""
The behavior if the user is not authenticated. Valid values: `deny`, `allow` and `authenticate`
"""
return pulumi.get(self, "on_unauthenticated_request")
@on_unauthenticated_request.setter
def on_unauthenticated_request(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "on_unauthenticated_request", value)
@property
@pulumi.getter
def scope(self) -> Optional[pulumi.Input[str]]:
"""
The set of user claims to be requested from the IdP.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scope", value)
@property
@pulumi.getter(name="sessionCookieName")
def session_cookie_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the cookie used to maintain session information.
"""
return pulumi.get(self, "session_cookie_name")
@session_cookie_name.setter
def session_cookie_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "session_cookie_name", value)
@property
@pulumi.getter(name="sessionTimeout")
def session_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The maximum duration of the authentication session, in seconds.
"""
return pulumi.get(self, "session_timeout")
@session_timeout.setter
def session_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "session_timeout", value)
@pulumi.input_type
class ListenerDefaultActionFixedResponseArgs:
def __init__(__self__, *,
content_type: pulumi.Input[str],
message_body: Optional[pulumi.Input[str]] = None,
status_code: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] content_type: The content type. Valid values are `text/plain`, `text/css`, `text/html`, `application/javascript` and `application/json`.
:param pulumi.Input[str] message_body: The message body.
:param pulumi.Input[str] status_code: The HTTP response code. Valid values are `2XX`, `4XX`, or `5XX`.
"""
pulumi.set(__self__, "content_type", content_type)
if message_body is not None:
pulumi.set(__self__, "message_body", message_body)
if status_code is not None:
pulumi.set(__self__, "status_code", status_code)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> pulumi.Input[str]:
"""
The content type. Valid values are `text/plain`, `text/css`, `text/html`, `application/javascript` and `application/json`.
"""
return pulumi.get(self, "content_type")
@content_type.setter
def content_type(self, value: pulumi.Input[str]):
pulumi.set(self, "content_type", value)
@property
@pulumi.getter(name="messageBody")
def message_body(self) -> Optional[pulumi.Input[str]]:
"""
The message body.
"""
return pulumi.get(self, "message_body")
@message_body.setter
def message_body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message_body", value)
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> Optional[pulumi.Input[str]]:
"""
The HTTP response code. Valid values are `2XX`, `4XX`, or `5XX`.
"""
return pulumi.get(self, "status_code")
@status_code.setter
def status_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status_code", value)
@pulumi.input_type
class ListenerDefaultActionForwardArgs:
def __init__(__self__, *,
target_groups: pulumi.Input[Sequence[pulumi.Input['ListenerDefaultActionForwardTargetGroupArgs']]],
stickiness: Optional[pulumi.Input['ListenerDefaultActionForwardStickinessArgs']] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['ListenerDefaultActionForwardTargetGroupArgs']]] target_groups: One or more target groups block.
:param pulumi.Input['ListenerDefaultActionForwardStickinessArgs'] stickiness: The target group stickiness for the rule.
"""
pulumi.set(__self__, "target_groups", target_groups)
if stickiness is not None:
pulumi.set(__self__, "stickiness", stickiness)
@property
@pulumi.getter(name="targetGroups")
def target_groups(self) -> pulumi.Input[Sequence[pulumi.Input['ListenerDefaultActionForwardTargetGroupArgs']]]:
"""
One or more target groups block.
"""
return pulumi.get(self, "target_groups")
@target_groups.setter
def target_groups(self, value: pulumi.Input[Sequence[pulumi.Input['ListenerDefaultActionForwardTargetGroupArgs']]]):
pulumi.set(self, "target_groups", value)
@property
@pulumi.getter
def stickiness(self) -> Optional[pulumi.Input['ListenerDefaultActionForwardStickinessArgs']]:
"""
The target group stickiness for the rule.
"""
return pulumi.get(self, "stickiness")
@stickiness.setter
def stickiness(self, value: Optional[pulumi.Input['ListenerDefaultActionForwardStickinessArgs']]):
pulumi.set(self, "stickiness", value)
@pulumi.input_type
class ListenerDefaultActionForwardStickinessArgs:
def __init__(__self__, *,
duration: pulumi.Input[int],
enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[int] duration: The time period, in seconds, during which requests from a client should be routed to the same target group. The range is 1-604800 seconds (7 days).
:param pulumi.Input[bool] enabled: Indicates whether target group stickiness is enabled.
"""
pulumi.set(__self__, "duration", duration)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def duration(self) -> pulumi.Input[int]:
"""
The time period, in seconds, during which requests from a client should be routed to the same target group. The range is 1-604800 seconds (7 days).
"""
return pulumi.get(self, "duration")
@duration.setter
def duration(self, value: pulumi.Input[int]):
pulumi.set(self, "duration", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether target group stickiness is enabled.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class ListenerDefaultActionForwardTargetGroupArgs:
def __init__(__self__, *,
arn: pulumi.Input[str],
weight: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) of the target group.
:param pulumi.Input[int] weight: The weight. The range is 0 to 999.
"""
pulumi.set(__self__, "arn", arn)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def arn(self) -> pulumi.Input[str]:
"""
The Amazon Resource Name (ARN) of the target group.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: pulumi.Input[str]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def weight(self) -> Optional[pulumi.Input[int]]:
"""
The weight. The range is 0 to 999.
"""
return pulumi.get(self, "weight")
@weight.setter
def weight(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "weight", value)
@pulumi.input_type
class ListenerDefaultActionRedirectArgs:
def __init__(__self__, *,
status_code: pulumi.Input[str],
host: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
query: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] status_code: The HTTP redirect code. The redirect is either permanent (`HTTP_301`) or temporary (`HTTP_302`).
:param pulumi.Input[str] host: The hostname. This component is not percent-encoded. The hostname can contain `#{host}`. Defaults to `#{host}`.
:param pulumi.Input[str] path: The absolute path, starting with the leading "/". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}. Defaults to `/#{path}`.
:param pulumi.Input[str] port: The port. Specify a value from `1` to `65535` or `#{port}`. Defaults to `#{port}`.
:param pulumi.Input[str] protocol: The protocol. Valid values are `HTTP`, `HTTPS`, or `#{protocol}`. Defaults to `#{protocol}`.
:param pulumi.Input[str] query: The query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading "?". Defaults to `#{query}`.
"""
pulumi.set(__self__, "status_code", status_code)
if host is not None:
pulumi.set(__self__, "host", host)
if path is not None:
pulumi.set(__self__, "path", path)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if query is not None:
pulumi.set(__self__, "query", query)
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> pulumi.Input[str]:
"""
The HTTP redirect code. The redirect is either permanent (`HTTP_301`) or temporary (`HTTP_302`).
"""
return pulumi.get(self, "status_code")
@status_code.setter
def status_code(self, value: pulumi.Input[str]):
pulumi.set(self, "status_code", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
The hostname. This component is not percent-encoded. The hostname can contain `#{host}`. Defaults to `#{host}`.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
The absolute path, starting with the leading "/". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}. Defaults to `/#{path}`.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[str]]:
"""
The port. Specify a value from `1` to `65535` or `#{port}`. Defaults to `#{port}`.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
The protocol. Valid values are `HTTP`, `HTTPS`, or `#{protocol}`. Defaults to `#{protocol}`.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter
def query(self) -> Optional[pulumi.Input[str]]:
"""
The query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading "?". Defaults to `#{query}`.
"""
return pulumi.get(self, "query")
@query.setter
def query(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "query", value)
@pulumi.input_type
class ListenerRuleActionArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
authenticate_cognito: Optional[pulumi.Input['ListenerRuleActionAuthenticateCognitoArgs']] = None,
authenticate_oidc: Optional[pulumi.Input['ListenerRuleActionAuthenticateOidcArgs']] = None,
fixed_response: Optional[pulumi.Input['ListenerRuleActionFixedResponseArgs']] = None,
forward: Optional[pulumi.Input['ListenerRuleActionForwardArgs']] = None,
order: Optional[pulumi.Input[int]] = None,
redirect: Optional[pulumi.Input['ListenerRuleActionRedirectArgs']] = None,
target_group_arn: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] type: The type of routing action. Valid values are `forward`, `redirect`, `fixed-response`, `authenticate-cognito` and `authenticate-oidc`.
:param pulumi.Input['ListenerRuleActionAuthenticateCognitoArgs'] authenticate_cognito: Information for creating an authenticate action using Cognito. Required if `type` is `authenticate-cognito`.
:param pulumi.Input['ListenerRuleActionAuthenticateOidcArgs'] authenticate_oidc: Information for creating an authenticate action using OIDC. Required if `type` is `authenticate-oidc`.
:param pulumi.Input['ListenerRuleActionFixedResponseArgs'] fixed_response: Information for creating an action that returns a custom HTTP response. Required if `type` is `fixed-response`.
:param pulumi.Input['ListenerRuleActionForwardArgs'] forward: Information for creating an action that distributes requests among one or more target groups. Specify only if `type` is `forward`. If you specify both `forward` block and `target_group_arn` attribute, you can specify only one target group using `forward` and it must be the same target group specified in `target_group_arn`.
:param pulumi.Input['ListenerRuleActionRedirectArgs'] redirect: Information for creating a redirect action. Required if `type` is `redirect`.
:param pulumi.Input[str] target_group_arn: The ARN of the Target Group to which to route traffic. Specify only if `type` is `forward` and you want to route to a single target group. To route to one or more target groups, use a `forward` block instead.
"""
pulumi.set(__self__, "type", type)
if authenticate_cognito is not None:
pulumi.set(__self__, "authenticate_cognito", authenticate_cognito)
if authenticate_oidc is not None:
pulumi.set(__self__, "authenticate_oidc", authenticate_oidc)
if fixed_response is not None:
pulumi.set(__self__, "fixed_response", fixed_response)
if forward is not None:
pulumi.set(__self__, "forward", forward)
if order is not None:
pulumi.set(__self__, "order", order)
if redirect is not None:
pulumi.set(__self__, "redirect", redirect)
if target_group_arn is not None:
pulumi.set(__self__, "target_group_arn", target_group_arn)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of routing action. Valid values are `forward`, `redirect`, `fixed-response`, `authenticate-cognito` and `authenticate-oidc`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="authenticateCognito")
def authenticate_cognito(self) -> Optional[pulumi.Input['ListenerRuleActionAuthenticateCognitoArgs']]:
"""
Information for creating an authenticate action using Cognito. Required if `type` is `authenticate-cognito`.
"""
return pulumi.get(self, "authenticate_cognito")
@authenticate_cognito.setter
def authenticate_cognito(self, value: Optional[pulumi.Input['ListenerRuleActionAuthenticateCognitoArgs']]):
pulumi.set(self, "authenticate_cognito", value)
@property
@pulumi.getter(name="authenticateOidc")
def authenticate_oidc(self) -> Optional[pulumi.Input['ListenerRuleActionAuthenticateOidcArgs']]:
"""
Information for creating an authenticate action using OIDC. Required if `type` is `authenticate-oidc`.
"""
return pulumi.get(self, "authenticate_oidc")
@authenticate_oidc.setter
def authenticate_oidc(self, value: Optional[pulumi.Input['ListenerRuleActionAuthenticateOidcArgs']]):
pulumi.set(self, "authenticate_oidc", value)
@property
@pulumi.getter(name="fixedResponse")
def fixed_response(self) -> Optional[pulumi.Input['ListenerRuleActionFixedResponseArgs']]:
"""
Information for creating an action that returns a custom HTTP response. Required if `type` is `fixed-response`.
"""
return pulumi.get(self, "fixed_response")
@fixed_response.setter
def fixed_response(self, value: Optional[pulumi.Input['ListenerRuleActionFixedResponseArgs']]):
pulumi.set(self, "fixed_response", value)
@property
@pulumi.getter
def forward(self) -> Optional[pulumi.Input['ListenerRuleActionForwardArgs']]:
"""
Information for creating an action that distributes requests among one or more target groups. Specify only if `type` is `forward`. If you specify both `forward` block and `target_group_arn` attribute, you can specify only one target group using `forward` and it must be the same target group specified in `target_group_arn`.
"""
return pulumi.get(self, "forward")
@forward.setter
def forward(self, value: Optional[pulumi.Input['ListenerRuleActionForwardArgs']]):
pulumi.set(self, "forward", value)
@property
@pulumi.getter
def order(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "order")
@order.setter
def order(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "order", value)
@property
@pulumi.getter
def redirect(self) -> Optional[pulumi.Input['ListenerRuleActionRedirectArgs']]:
"""
Information for creating a redirect action. Required if `type` is `redirect`.
"""
return pulumi.get(self, "redirect")
@redirect.setter
def redirect(self, value: Optional[pulumi.Input['ListenerRuleActionRedirectArgs']]):
pulumi.set(self, "redirect", value)
@property
@pulumi.getter(name="targetGroupArn")
def target_group_arn(self) -> Optional[pulumi.Input[str]]:
"""
The ARN of the Target Group to which to route traffic. Specify only if `type` is `forward` and you want to route to a single target group. To route to one or more target groups, use a `forward` block instead.
"""
return pulumi.get(self, "target_group_arn")
@target_group_arn.setter
def target_group_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_group_arn", value)
@pulumi.input_type
class ListenerRuleActionAuthenticateCognitoArgs:
def __init__(__self__, *,
user_pool_arn: pulumi.Input[str],
user_pool_client_id: pulumi.Input[str],
user_pool_domain: pulumi.Input[str],
authentication_request_extra_params: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
on_unauthenticated_request: Optional[pulumi.Input[str]] = None,
scope: Optional[pulumi.Input[str]] = None,
session_cookie_name: Optional[pulumi.Input[str]] = None,
session_timeout: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] user_pool_arn: The ARN of the Cognito user pool.
:param pulumi.Input[str] user_pool_client_id: The ID of the Cognito user pool client.
:param pulumi.Input[str] user_pool_domain: The domain prefix or fully-qualified domain name of the Cognito user pool.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] authentication_request_extra_params: The query parameters to include in the redirect request to the authorization endpoint. Max: 10.
:param pulumi.Input[str] on_unauthenticated_request: The behavior if the user is not authenticated. Valid values: `deny`, `allow` and `authenticate`
:param pulumi.Input[str] scope: The set of user claims to be requested from the IdP.
:param pulumi.Input[str] session_cookie_name: The name of the cookie used to maintain session information.
:param pulumi.Input[int] session_timeout: The maximum duration of the authentication session, in seconds.
"""
pulumi.set(__self__, "user_pool_arn", user_pool_arn)
pulumi.set(__self__, "user_pool_client_id", user_pool_client_id)
pulumi.set(__self__, "user_pool_domain", user_pool_domain)
if authentication_request_extra_params is not None:
pulumi.set(__self__, "authentication_request_extra_params", authentication_request_extra_params)
if on_unauthenticated_request is not None:
pulumi.set(__self__, "on_unauthenticated_request", on_unauthenticated_request)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if session_cookie_name is not None:
pulumi.set(__self__, "session_cookie_name", session_cookie_name)
if session_timeout is not None:
pulumi.set(__self__, "session_timeout", session_timeout)
@property
@pulumi.getter(name="userPoolArn")
def user_pool_arn(self) -> pulumi.Input[str]:
"""
The ARN of the Cognito user pool.
"""
return pulumi.get(self, "user_pool_arn")
@user_pool_arn.setter
def user_pool_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "user_pool_arn", value)
@property
@pulumi.getter(name="userPoolClientId")
def user_pool_client_id(self) -> pulumi.Input[str]:
"""
The ID of the Cognito user pool client.
"""
return pulumi.get(self, "user_pool_client_id")
@user_pool_client_id.setter
def user_pool_client_id(self, value: pulumi.Input[str]):
pulumi.set(self, "user_pool_client_id", value)
@property
@pulumi.getter(name="userPoolDomain")
def user_pool_domain(self) -> pulumi.Input[str]:
"""
The domain prefix or fully-qualified domain name of the Cognito user pool.
"""
return pulumi.get(self, "user_pool_domain")
@user_pool_domain.setter
def user_pool_domain(self, value: pulumi.Input[str]):
pulumi.set(self, "user_pool_domain", value)
@property
@pulumi.getter(name="authenticationRequestExtraParams")
def authentication_request_extra_params(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The query parameters to include in the redirect request to the authorization endpoint. Max: 10.
"""
return pulumi.get(self, "authentication_request_extra_params")
@authentication_request_extra_params.setter
def authentication_request_extra_params(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "authentication_request_extra_params", value)
@property
@pulumi.getter(name="onUnauthenticatedRequest")
def on_unauthenticated_request(self) -> Optional[pulumi.Input[str]]:
"""
The behavior if the user is not authenticated. Valid values: `deny`, `allow` and `authenticate`
"""
return pulumi.get(self, "on_unauthenticated_request")
@on_unauthenticated_request.setter
def on_unauthenticated_request(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "on_unauthenticated_request", value)
@property
@pulumi.getter
def scope(self) -> Optional[pulumi.Input[str]]:
"""
The set of user claims to be requested from the IdP.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scope", value)
@property
@pulumi.getter(name="sessionCookieName")
def session_cookie_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the cookie used to maintain session information.
"""
return pulumi.get(self, "session_cookie_name")
@session_cookie_name.setter
def session_cookie_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "session_cookie_name", value)
@property
@pulumi.getter(name="sessionTimeout")
def session_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The maximum duration of the authentication session, in seconds.
"""
return pulumi.get(self, "session_timeout")
@session_timeout.setter
def session_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "session_timeout", value)
@pulumi.input_type
class ListenerRuleActionAuthenticateOidcArgs:
def __init__(__self__, *,
authorization_endpoint: pulumi.Input[str],
client_id: pulumi.Input[str],
client_secret: pulumi.Input[str],
issuer: pulumi.Input[str],
token_endpoint: pulumi.Input[str],
user_info_endpoint: pulumi.Input[str],
authentication_request_extra_params: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
on_unauthenticated_request: Optional[pulumi.Input[str]] = None,
scope: Optional[pulumi.Input[str]] = None,
session_cookie_name: Optional[pulumi.Input[str]] = None,
session_timeout: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] authorization_endpoint: The authorization endpoint of the IdP.
:param pulumi.Input[str] client_id: The OAuth 2.0 client identifier.
:param pulumi.Input[str] client_secret: The OAuth 2.0 client secret.
:param pulumi.Input[str] issuer: The OIDC issuer identifier of the IdP.
:param pulumi.Input[str] token_endpoint: The token endpoint of the IdP.
:param pulumi.Input[str] user_info_endpoint: The user info endpoint of the IdP.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] authentication_request_extra_params: The query parameters to include in the redirect request to the authorization endpoint. Max: 10.
:param pulumi.Input[str] on_unauthenticated_request: The behavior if the user is not authenticated. Valid values: `deny`, `allow` and `authenticate`
:param pulumi.Input[str] scope: The set of user claims to be requested from the IdP.
:param pulumi.Input[str] session_cookie_name: The name of the cookie used to maintain session information.
:param pulumi.Input[int] session_timeout: The maximum duration of the authentication session, in seconds.
"""
pulumi.set(__self__, "authorization_endpoint", authorization_endpoint)
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "client_secret", client_secret)
pulumi.set(__self__, "issuer", issuer)
pulumi.set(__self__, "token_endpoint", token_endpoint)
pulumi.set(__self__, "user_info_endpoint", user_info_endpoint)
if authentication_request_extra_params is not None:
pulumi.set(__self__, "authentication_request_extra_params", authentication_request_extra_params)
if on_unauthenticated_request is not None:
pulumi.set(__self__, "on_unauthenticated_request", on_unauthenticated_request)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if session_cookie_name is not None:
pulumi.set(__self__, "session_cookie_name", session_cookie_name)
if session_timeout is not None:
pulumi.set(__self__, "session_timeout", session_timeout)
@property
@pulumi.getter(name="authorizationEndpoint")
def authorization_endpoint(self) -> pulumi.Input[str]:
"""
The authorization endpoint of the IdP.
"""
return pulumi.get(self, "authorization_endpoint")
@authorization_endpoint.setter
def authorization_endpoint(self, value: pulumi.Input[str]):
pulumi.set(self, "authorization_endpoint", value)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> pulumi.Input[str]:
"""
The OAuth 2.0 client identifier.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: pulumi.Input[str]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> pulumi.Input[str]:
"""
The OAuth 2.0 client secret.
"""
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: pulumi.Input[str]):
pulumi.set(self, "client_secret", value)
@property
@pulumi.getter
def issuer(self) -> pulumi.Input[str]:
"""
The OIDC issuer identifier of the IdP.
"""
return pulumi.get(self, "issuer")
@issuer.setter
def issuer(self, value: pulumi.Input[str]):
pulumi.set(self, "issuer", value)
@property
@pulumi.getter(name="tokenEndpoint")
def token_endpoint(self) -> pulumi.Input[str]:
"""
The token endpoint of the IdP.
"""
return pulumi.get(self, "token_endpoint")
@token_endpoint.setter
def token_endpoint(self, value: pulumi.Input[str]):
pulumi.set(self, "token_endpoint", value)
@property
@pulumi.getter(name="userInfoEndpoint")
def user_info_endpoint(self) -> pulumi.Input[str]:
"""
The user info endpoint of the IdP.
"""
return pulumi.get(self, "user_info_endpoint")
@user_info_endpoint.setter
def user_info_endpoint(self, value: pulumi.Input[str]):
pulumi.set(self, "user_info_endpoint", value)
@property
@pulumi.getter(name="authenticationRequestExtraParams")
def authentication_request_extra_params(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The query parameters to include in the redirect request to the authorization endpoint. Max: 10.
"""
return pulumi.get(self, "authentication_request_extra_params")
@authentication_request_extra_params.setter
def authentication_request_extra_params(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "authentication_request_extra_params", value)
@property
@pulumi.getter(name="onUnauthenticatedRequest")
def on_unauthenticated_request(self) -> Optional[pulumi.Input[str]]:
"""
The behavior if the user is not authenticated. Valid values: `deny`, `allow` and `authenticate`
"""
return pulumi.get(self, "on_unauthenticated_request")
@on_unauthenticated_request.setter
def on_unauthenticated_request(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "on_unauthenticated_request", value)
@property
@pulumi.getter
def scope(self) -> Optional[pulumi.Input[str]]:
"""
The set of user claims to be requested from the IdP.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scope", value)
@property
@pulumi.getter(name="sessionCookieName")
def session_cookie_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the cookie used to maintain session information.
"""
return pulumi.get(self, "session_cookie_name")
@session_cookie_name.setter
def session_cookie_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "session_cookie_name", value)
@property
@pulumi.getter(name="sessionTimeout")
def session_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The maximum duration of the authentication session, in seconds.
"""
return pulumi.get(self, "session_timeout")
@session_timeout.setter
def session_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "session_timeout", value)
@pulumi.input_type
class ListenerRuleActionFixedResponseArgs:
def __init__(__self__, *,
content_type: pulumi.Input[str],
message_body: Optional[pulumi.Input[str]] = None,
status_code: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] content_type: The content type. Valid values are `text/plain`, `text/css`, `text/html`, `application/javascript` and `application/json`.
:param pulumi.Input[str] message_body: The message body.
:param pulumi.Input[str] status_code: The HTTP response code. Valid values are `2XX`, `4XX`, or `5XX`.
"""
pulumi.set(__self__, "content_type", content_type)
if message_body is not None:
pulumi.set(__self__, "message_body", message_body)
if status_code is not None:
pulumi.set(__self__, "status_code", status_code)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> pulumi.Input[str]:
"""
The content type. Valid values are `text/plain`, `text/css`, `text/html`, `application/javascript` and `application/json`.
"""
return pulumi.get(self, "content_type")
@content_type.setter
def content_type(self, value: pulumi.Input[str]):
pulumi.set(self, "content_type", value)
@property
@pulumi.getter(name="messageBody")
def message_body(self) -> Optional[pulumi.Input[str]]:
"""
The message body.
"""
return pulumi.get(self, "message_body")
@message_body.setter
def message_body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message_body", value)
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> Optional[pulumi.Input[str]]:
"""
The HTTP response code. Valid values are `2XX`, `4XX`, or `5XX`.
"""
return pulumi.get(self, "status_code")
@status_code.setter
def status_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status_code", value)
@pulumi.input_type
class ListenerRuleActionForwardArgs:
def __init__(__self__, *,
target_groups: pulumi.Input[Sequence[pulumi.Input['ListenerRuleActionForwardTargetGroupArgs']]],
stickiness: Optional[pulumi.Input['ListenerRuleActionForwardStickinessArgs']] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['ListenerRuleActionForwardTargetGroupArgs']]] target_groups: One or more target groups block.
:param pulumi.Input['ListenerRuleActionForwardStickinessArgs'] stickiness: The target group stickiness for the rule.
"""
pulumi.set(__self__, "target_groups", target_groups)
if stickiness is not None:
pulumi.set(__self__, "stickiness", stickiness)
@property
@pulumi.getter(name="targetGroups")
def target_groups(self) -> pulumi.Input[Sequence[pulumi.Input['ListenerRuleActionForwardTargetGroupArgs']]]:
"""
One or more target groups block.
"""
return pulumi.get(self, "target_groups")
@target_groups.setter
def target_groups(self, value: pulumi.Input[Sequence[pulumi.Input['ListenerRuleActionForwardTargetGroupArgs']]]):
pulumi.set(self, "target_groups", value)
@property
@pulumi.getter
def stickiness(self) -> Optional[pulumi.Input['ListenerRuleActionForwardStickinessArgs']]:
"""
The target group stickiness for the rule.
"""
return pulumi.get(self, "stickiness")
@stickiness.setter
def stickiness(self, value: Optional[pulumi.Input['ListenerRuleActionForwardStickinessArgs']]):
pulumi.set(self, "stickiness", value)
@pulumi.input_type
class ListenerRuleActionForwardStickinessArgs:
def __init__(__self__, *,
duration: pulumi.Input[int],
enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[int] duration: The time period, in seconds, during which requests from a client should be routed to the same target group. The range is 1-604800 seconds (7 days).
:param pulumi.Input[bool] enabled: Indicates whether target group stickiness is enabled.
"""
pulumi.set(__self__, "duration", duration)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def duration(self) -> pulumi.Input[int]:
"""
The time period, in seconds, during which requests from a client should be routed to the same target group. The range is 1-604800 seconds (7 days).
"""
return pulumi.get(self, "duration")
@duration.setter
def duration(self, value: pulumi.Input[int]):
pulumi.set(self, "duration", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether target group stickiness is enabled.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class ListenerRuleActionForwardTargetGroupArgs:
def __init__(__self__, *,
arn: pulumi.Input[str],
weight: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) of the target group.
:param pulumi.Input[int] weight: The weight. The range is 0 to 999.
"""
pulumi.set(__self__, "arn", arn)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def arn(self) -> pulumi.Input[str]:
"""
The Amazon Resource Name (ARN) of the target group.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: pulumi.Input[str]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def weight(self) -> Optional[pulumi.Input[int]]:
"""
The weight. The range is 0 to 999.
"""
return pulumi.get(self, "weight")
@weight.setter
def weight(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "weight", value)
@pulumi.input_type
class ListenerRuleActionRedirectArgs:
def __init__(__self__, *,
status_code: pulumi.Input[str],
host: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
query: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] status_code: The HTTP redirect code. The redirect is either permanent (`HTTP_301`) or temporary (`HTTP_302`).
:param pulumi.Input[str] host: The hostname. This component is not percent-encoded. The hostname can contain `#{host}`. Defaults to `#{host}`.
:param pulumi.Input[str] path: The absolute path, starting with the leading "/". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}. Defaults to `/#{path}`.
:param pulumi.Input[str] port: The port. Specify a value from `1` to `65535` or `#{port}`. Defaults to `#{port}`.
:param pulumi.Input[str] protocol: The protocol. Valid values are `HTTP`, `HTTPS`, or `#{protocol}`. Defaults to `#{protocol}`.
:param pulumi.Input[str] query: The query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading "?". Defaults to `#{query}`.
"""
pulumi.set(__self__, "status_code", status_code)
if host is not None:
pulumi.set(__self__, "host", host)
if path is not None:
pulumi.set(__self__, "path", path)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if query is not None:
pulumi.set(__self__, "query", query)
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> pulumi.Input[str]:
"""
The HTTP redirect code. The redirect is either permanent (`HTTP_301`) or temporary (`HTTP_302`).
"""
return pulumi.get(self, "status_code")
@status_code.setter
def status_code(self, value: pulumi.Input[str]):
pulumi.set(self, "status_code", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
The hostname. This component is not percent-encoded. The hostname can contain `#{host}`. Defaults to `#{host}`.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
The absolute path, starting with the leading "/". This component is not percent-encoded. The path can contain #{host}, #{path}, and #{port}. Defaults to `/#{path}`.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[str]]:
"""
The port. Specify a value from `1` to `65535` or `#{port}`. Defaults to `#{port}`.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
The protocol. Valid values are `HTTP`, `HTTPS`, or `#{protocol}`. Defaults to `#{protocol}`.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter
def query(self) -> Optional[pulumi.Input[str]]:
"""
The query parameters, URL-encoded when necessary, but not percent-encoded. Do not include the leading "?". Defaults to `#{query}`.
"""
return pulumi.get(self, "query")
@query.setter
def query(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "query", value)
@pulumi.input_type
class ListenerRuleConditionArgs:
def __init__(__self__, *,
host_header: Optional[pulumi.Input['ListenerRuleConditionHostHeaderArgs']] = None,
http_header: Optional[pulumi.Input['ListenerRuleConditionHttpHeaderArgs']] = None,
http_request_method: Optional[pulumi.Input['ListenerRuleConditionHttpRequestMethodArgs']] = None,
path_pattern: Optional[pulumi.Input['ListenerRuleConditionPathPatternArgs']] = None,
query_strings: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerRuleConditionQueryStringArgs']]]] = None,
source_ip: Optional[pulumi.Input['ListenerRuleConditionSourceIpArgs']] = None):
"""
:param pulumi.Input['ListenerRuleConditionHostHeaderArgs'] host_header: Contains a single `values` item which is a list of host header patterns to match. The maximum size of each pattern is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). Only one pattern needs to match for the condition to be satisfied.
:param pulumi.Input['ListenerRuleConditionHttpHeaderArgs'] http_header: HTTP headers to match. HTTP Header block fields documented below.
:param pulumi.Input['ListenerRuleConditionHttpRequestMethodArgs'] http_request_method: Contains a single `values` item which is a list of HTTP request methods or verbs to match. Maximum size is 40 characters. Only allowed characters are A-Z, hyphen (-) and underscore (\_). Comparison is case sensitive. Wildcards are not supported. Only one needs to match for the condition to be satisfied. AWS recommends that GET and HEAD requests are routed in the same way because the response to a HEAD request may be cached.
:param pulumi.Input['ListenerRuleConditionPathPatternArgs'] path_pattern: Contains a single `values` item which is a list of path patterns to match against the request URL. Maximum size of each pattern is 128 characters. Comparison is case sensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). Only one pattern needs to match for the condition to be satisfied. Path pattern is compared only to the path of the URL, not to its query string. To compare against the query string, use a `query_string` condition.
:param pulumi.Input[Sequence[pulumi.Input['ListenerRuleConditionQueryStringArgs']]] query_strings: Query strings to match. Query String block fields documented below.
:param pulumi.Input['ListenerRuleConditionSourceIpArgs'] source_ip: Contains a single `values` item which is a list of source IP CIDR notations to match. You can use both IPv4 and IPv6 addresses. Wildcards are not supported. Condition is satisfied if the source IP address of the request matches one of the CIDR blocks. Condition is not satisfied by the addresses in the `X-Forwarded-For` header, use `http_header` condition instead.
"""
if host_header is not None:
pulumi.set(__self__, "host_header", host_header)
if http_header is not None:
pulumi.set(__self__, "http_header", http_header)
if http_request_method is not None:
pulumi.set(__self__, "http_request_method", http_request_method)
if path_pattern is not None:
pulumi.set(__self__, "path_pattern", path_pattern)
if query_strings is not None:
pulumi.set(__self__, "query_strings", query_strings)
if source_ip is not None:
pulumi.set(__self__, "source_ip", source_ip)
@property
@pulumi.getter(name="hostHeader")
def host_header(self) -> Optional[pulumi.Input['ListenerRuleConditionHostHeaderArgs']]:
"""
Contains a single `values` item which is a list of host header patterns to match. The maximum size of each pattern is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). Only one pattern needs to match for the condition to be satisfied.
"""
return pulumi.get(self, "host_header")
@host_header.setter
def host_header(self, value: Optional[pulumi.Input['ListenerRuleConditionHostHeaderArgs']]):
pulumi.set(self, "host_header", value)
@property
@pulumi.getter(name="httpHeader")
def http_header(self) -> Optional[pulumi.Input['ListenerRuleConditionHttpHeaderArgs']]:
"""
HTTP headers to match. HTTP Header block fields documented below.
"""
return pulumi.get(self, "http_header")
@http_header.setter
def http_header(self, value: Optional[pulumi.Input['ListenerRuleConditionHttpHeaderArgs']]):
pulumi.set(self, "http_header", value)
@property
@pulumi.getter(name="httpRequestMethod")
def http_request_method(self) -> Optional[pulumi.Input['ListenerRuleConditionHttpRequestMethodArgs']]:
"""
Contains a single `values` item which is a list of HTTP request methods or verbs to match. Maximum size is 40 characters. Only allowed characters are A-Z, hyphen (-) and underscore (\_). Comparison is case sensitive. Wildcards are not supported. Only one needs to match for the condition to be satisfied. AWS recommends that GET and HEAD requests are routed in the same way because the response to a HEAD request may be cached.
"""
return pulumi.get(self, "http_request_method")
@http_request_method.setter
def http_request_method(self, value: Optional[pulumi.Input['ListenerRuleConditionHttpRequestMethodArgs']]):
pulumi.set(self, "http_request_method", value)
@property
@pulumi.getter(name="pathPattern")
def path_pattern(self) -> Optional[pulumi.Input['ListenerRuleConditionPathPatternArgs']]:
"""
Contains a single `values` item which is a list of path patterns to match against the request URL. Maximum size of each pattern is 128 characters. Comparison is case sensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). Only one pattern needs to match for the condition to be satisfied. Path pattern is compared only to the path of the URL, not to its query string. To compare against the query string, use a `query_string` condition.
"""
return pulumi.get(self, "path_pattern")
@path_pattern.setter
def path_pattern(self, value: Optional[pulumi.Input['ListenerRuleConditionPathPatternArgs']]):
pulumi.set(self, "path_pattern", value)
@property
@pulumi.getter(name="queryStrings")
def query_strings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ListenerRuleConditionQueryStringArgs']]]]:
"""
Query strings to match. Query String block fields documented below.
"""
return pulumi.get(self, "query_strings")
@query_strings.setter
def query_strings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerRuleConditionQueryStringArgs']]]]):
pulumi.set(self, "query_strings", value)
@property
@pulumi.getter(name="sourceIp")
def source_ip(self) -> Optional[pulumi.Input['ListenerRuleConditionSourceIpArgs']]:
"""
Contains a single `values` item which is a list of source IP CIDR notations to match. You can use both IPv4 and IPv6 addresses. Wildcards are not supported. Condition is satisfied if the source IP address of the request matches one of the CIDR blocks. Condition is not satisfied by the addresses in the `X-Forwarded-For` header, use `http_header` condition instead.
"""
return pulumi.get(self, "source_ip")
@source_ip.setter
def source_ip(self, value: Optional[pulumi.Input['ListenerRuleConditionSourceIpArgs']]):
pulumi.set(self, "source_ip", value)
@pulumi.input_type
class ListenerRuleConditionHostHeaderArgs:
def __init__(__self__, *,
values: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: List of header value patterns to match. Maximum size of each pattern is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). If the same header appears multiple times in the request they will be searched in order until a match is found. Only one pattern needs to match for the condition to be satisfied. To require that all of the strings are a match, create one condition block per string.
"""
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def values(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of header value patterns to match. Maximum size of each pattern is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). If the same header appears multiple times in the request they will be searched in order until a match is found. Only one pattern needs to match for the condition to be satisfied. To require that all of the strings are a match, create one condition block per string.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class ListenerRuleConditionHttpHeaderArgs:
def __init__(__self__, *,
http_header_name: pulumi.Input[str],
values: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
:param pulumi.Input[str] http_header_name: Name of HTTP header to search. The maximum size is 40 characters. Comparison is case insensitive. Only RFC7240 characters are supported. Wildcards are not supported. You cannot use HTTP header condition to specify the host header, use a `host-header` condition instead.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: List of header value patterns to match. Maximum size of each pattern is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). If the same header appears multiple times in the request they will be searched in order until a match is found. Only one pattern needs to match for the condition to be satisfied. To require that all of the strings are a match, create one condition block per string.
"""
pulumi.set(__self__, "http_header_name", http_header_name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter(name="httpHeaderName")
def http_header_name(self) -> pulumi.Input[str]:
"""
Name of HTTP header to search. The maximum size is 40 characters. Comparison is case insensitive. Only RFC7240 characters are supported. Wildcards are not supported. You cannot use HTTP header condition to specify the host header, use a `host-header` condition instead.
"""
return pulumi.get(self, "http_header_name")
@http_header_name.setter
def http_header_name(self, value: pulumi.Input[str]):
pulumi.set(self, "http_header_name", value)
@property
@pulumi.getter
def values(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of header value patterns to match. Maximum size of each pattern is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). If the same header appears multiple times in the request they will be searched in order until a match is found. Only one pattern needs to match for the condition to be satisfied. To require that all of the strings are a match, create one condition block per string.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class ListenerRuleConditionHttpRequestMethodArgs:
def __init__(__self__, *,
values: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: List of header value patterns to match. Maximum size of each pattern is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). If the same header appears multiple times in the request they will be searched in order until a match is found. Only one pattern needs to match for the condition to be satisfied. To require that all of the strings are a match, create one condition block per string.
"""
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def values(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of header value patterns to match. Maximum size of each pattern is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). If the same header appears multiple times in the request they will be searched in order until a match is found. Only one pattern needs to match for the condition to be satisfied. To require that all of the strings are a match, create one condition block per string.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class ListenerRuleConditionPathPatternArgs:
def __init__(__self__, *,
values: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: List of header value patterns to match. Maximum size of each pattern is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). If the same header appears multiple times in the request they will be searched in order until a match is found. Only one pattern needs to match for the condition to be satisfied. To require that all of the strings are a match, create one condition block per string.
"""
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def values(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of header value patterns to match. Maximum size of each pattern is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). If the same header appears multiple times in the request they will be searched in order until a match is found. Only one pattern needs to match for the condition to be satisfied. To require that all of the strings are a match, create one condition block per string.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class ListenerRuleConditionQueryStringArgs:
def __init__(__self__, *,
value: pulumi.Input[str],
key: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] value: Query string value pattern to match.
:param pulumi.Input[str] key: Query string key pattern to match.
"""
pulumi.set(__self__, "value", value)
if key is not None:
pulumi.set(__self__, "key", key)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
Query string value pattern to match.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Query string key pattern to match.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@pulumi.input_type
class ListenerRuleConditionSourceIpArgs:
def __init__(__self__, *,
values: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: List of header value patterns to match. Maximum size of each pattern is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). If the same header appears multiple times in the request they will be searched in order until a match is found. Only one pattern needs to match for the condition to be satisfied. To require that all of the strings are a match, create one condition block per string.
"""
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def values(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of header value patterns to match. Maximum size of each pattern is 128 characters. Comparison is case insensitive. Wildcard characters supported: * (matches 0 or more characters) and ? (matches exactly 1 character). If the same header appears multiple times in the request they will be searched in order until a match is found. Only one pattern needs to match for the condition to be satisfied. To require that all of the strings are a match, create one condition block per string.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class LoadBalancerAccessLogsArgs:
def __init__(__self__, *,
bucket: pulumi.Input[str],
enabled: Optional[pulumi.Input[bool]] = None,
prefix: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] bucket: The S3 bucket name to store the logs in.
:param pulumi.Input[bool] enabled: Boolean to enable / disable `access_logs`. Defaults to `false`, even when `bucket` is specified.
:param pulumi.Input[str] prefix: The S3 bucket prefix. Logs are stored in the root if not configured.
"""
pulumi.set(__self__, "bucket", bucket)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
The S3 bucket name to store the logs in.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Boolean to enable / disable `access_logs`. Defaults to `false`, even when `bucket` is specified.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def prefix(self) -> Optional[pulumi.Input[str]]:
"""
The S3 bucket prefix. Logs are stored in the root if not configured.
"""
return pulumi.get(self, "prefix")
@prefix.setter
def prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prefix", value)
@pulumi.input_type
class LoadBalancerSubnetMappingArgs:
def __init__(__self__, *,
subnet_id: pulumi.Input[str],
allocation_id: Optional[pulumi.Input[str]] = None,
outpost_id: Optional[pulumi.Input[str]] = None,
private_ipv4_address: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] subnet_id: The id of the subnet of which to attach to the load balancer. You can specify only one subnet per Availability Zone.
:param pulumi.Input[str] allocation_id: The allocation ID of the Elastic IP address.
:param pulumi.Input[str] private_ipv4_address: A private ipv4 address within the subnet to assign to the internal-facing load balancer.
"""
pulumi.set(__self__, "subnet_id", subnet_id)
if allocation_id is not None:
pulumi.set(__self__, "allocation_id", allocation_id)
if outpost_id is not None:
pulumi.set(__self__, "outpost_id", outpost_id)
if private_ipv4_address is not None:
pulumi.set(__self__, "private_ipv4_address", private_ipv4_address)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> pulumi.Input[str]:
"""
The id of the subnet of which to attach to the load balancer. You can specify only one subnet per Availability Zone.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: pulumi.Input[str]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="allocationId")
def allocation_id(self) -> Optional[pulumi.Input[str]]:
"""
The allocation ID of the Elastic IP address.
"""
return pulumi.get(self, "allocation_id")
@allocation_id.setter
def allocation_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "allocation_id", value)
@property
@pulumi.getter(name="outpostId")
def outpost_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "outpost_id")
@outpost_id.setter
def outpost_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "outpost_id", value)
@property
@pulumi.getter(name="privateIpv4Address")
def private_ipv4_address(self) -> Optional[pulumi.Input[str]]:
"""
A private ipv4 address within the subnet to assign to the internal-facing load balancer.
"""
return pulumi.get(self, "private_ipv4_address")
@private_ipv4_address.setter
def private_ipv4_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ipv4_address", value)
@pulumi.input_type
class TargetGroupHealthCheckArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
healthy_threshold: Optional[pulumi.Input[int]] = None,
interval: Optional[pulumi.Input[int]] = None,
matcher: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
timeout: Optional[pulumi.Input[int]] = None,
unhealthy_threshold: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[bool] enabled: Boolean to enable / disable `stickiness`. Default is `true`
:param pulumi.Input[int] healthy_threshold: The number of consecutive health checks successes required before considering an unhealthy target healthy. Defaults to 3.
:param pulumi.Input[int] interval: The approximate amount of time, in seconds, between health checks of an individual target. Minimum value 5 seconds, Maximum value 300 seconds. For `lambda` target groups, it needs to be greater as the `timeout` of the underlying `lambda`. Default 30 seconds.
:param pulumi.Input[str] matcher: The HTTP codes to use when checking for a successful response from a target. You can specify multiple values (for example, "200,202") or a range of values (for example, "200-299"). Applies to Application Load Balancers only (HTTP/HTTPS), not Network Load Balancers (TCP).
:param pulumi.Input[str] path: The destination for the health check request. Applies to Application Load Balancers only (HTTP/HTTPS), not Network Load Balancers (TCP).
:param pulumi.Input[str] port: The port on which targets receive traffic, unless overridden when registering a specific target. Required when `target_type` is `instance` or `ip`. Does not apply when `target_type` is `lambda`.
:param pulumi.Input[str] protocol: The protocol to use for routing traffic to the targets. Should be one of "TCP", "TLS", "UDP", "TCP_UDP", "HTTP" or "HTTPS". Required when `target_type` is `instance` or `ip`. Does not apply when `target_type` is `lambda`.
:param pulumi.Input[int] timeout: The amount of time, in seconds, during which no response means a failed health check. For Application Load Balancers, the range is 2 to 120 seconds, and the default is 5 seconds for the `instance` target type and 30 seconds for the `lambda` target type. For Network Load Balancers, you cannot set a custom value, and the default is 10 seconds for TCP and HTTPS health checks and 6 seconds for HTTP health checks.
:param pulumi.Input[int] unhealthy_threshold: The number of consecutive health check failures required before considering the target unhealthy . For Network Load Balancers, this value must be the same as the `healthy_threshold`. Defaults to 3.
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if healthy_threshold is not None:
pulumi.set(__self__, "healthy_threshold", healthy_threshold)
if interval is not None:
pulumi.set(__self__, "interval", interval)
if matcher is not None:
pulumi.set(__self__, "matcher", matcher)
if path is not None:
pulumi.set(__self__, "path", path)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
if unhealthy_threshold is not None:
pulumi.set(__self__, "unhealthy_threshold", unhealthy_threshold)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Boolean to enable / disable `stickiness`. Default is `true`
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="healthyThreshold")
def healthy_threshold(self) -> Optional[pulumi.Input[int]]:
"""
The number of consecutive health checks successes required before considering an unhealthy target healthy. Defaults to 3.
"""
return pulumi.get(self, "healthy_threshold")
@healthy_threshold.setter
def healthy_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "healthy_threshold", value)
@property
@pulumi.getter
def interval(self) -> Optional[pulumi.Input[int]]:
"""
The approximate amount of time, in seconds, between health checks of an individual target. Minimum value 5 seconds, Maximum value 300 seconds. For `lambda` target groups, it needs to be greater as the `timeout` of the underlying `lambda`. Default 30 seconds.
"""
return pulumi.get(self, "interval")
@interval.setter
def interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "interval", value)
@property
@pulumi.getter
def matcher(self) -> Optional[pulumi.Input[str]]:
"""
The HTTP codes to use when checking for a successful response from a target. You can specify multiple values (for example, "200,202") or a range of values (for example, "200-299"). Applies to Application Load Balancers only (HTTP/HTTPS), not Network Load Balancers (TCP).
"""
return pulumi.get(self, "matcher")
@matcher.setter
def matcher(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "matcher", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
The destination for the health check request. Applies to Application Load Balancers only (HTTP/HTTPS), not Network Load Balancers (TCP).
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[str]]:
"""
The port on which targets receive traffic, unless overridden when registering a specific target. Required when `target_type` is `instance` or `ip`. Does not apply when `target_type` is `lambda`.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
The protocol to use for routing traffic to the targets. Should be one of "TCP", "TLS", "UDP", "TCP_UDP", "HTTP" or "HTTPS". Required when `target_type` is `instance` or `ip`. Does not apply when `target_type` is `lambda`.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in seconds, during which no response means a failed health check. For Application Load Balancers, the range is 2 to 120 seconds, and the default is 5 seconds for the `instance` target type and 30 seconds for the `lambda` target type. For Network Load Balancers, you cannot set a custom value, and the default is 10 seconds for TCP and HTTPS health checks and 6 seconds for HTTP health checks.
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout", value)
@property
@pulumi.getter(name="unhealthyThreshold")
def unhealthy_threshold(self) -> Optional[pulumi.Input[int]]:
"""
The number of consecutive health check failures required before considering the target unhealthy . For Network Load Balancers, this value must be the same as the `healthy_threshold`. Defaults to 3.
"""
return pulumi.get(self, "unhealthy_threshold")
@unhealthy_threshold.setter
def unhealthy_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "unhealthy_threshold", value)
@pulumi.input_type
class TargetGroupStickinessArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
cookie_duration: Optional[pulumi.Input[int]] = None,
enabled: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] type: The type of sticky sessions. The only current possible values are `lb_cookie` for ALBs and `source_ip` for NLBs.
:param pulumi.Input[int] cookie_duration: Only used when the type is `lb_cookie`. The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).
:param pulumi.Input[bool] enabled: Indicates whether health checks are enabled. Defaults to true.
"""
pulumi.set(__self__, "type", type)
if cookie_duration is not None:
pulumi.set(__self__, "cookie_duration", cookie_duration)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of sticky sessions. The only current possible values are `lb_cookie` for ALBs and `source_ip` for NLBs.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="cookieDuration")
def cookie_duration(self) -> Optional[pulumi.Input[int]]:
"""
Only used when the type is `lb_cookie`. The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).
"""
return pulumi.get(self, "cookie_duration")
@cookie_duration.setter
def cookie_duration(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cookie_duration", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether health checks are enabled. Defaults to true.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
| 47.020568
| 574
| 0.675617
| 11,639
| 96,016
| 5.429247
| 0.039007
| 0.093653
| 0.068016
| 0.035179
| 0.89383
| 0.866943
| 0.844851
| 0.828646
| 0.82113
| 0.807963
| 0
| 0.004482
| 0.219182
| 96,016
| 2,041
| 575
| 47.043606
| 0.838391
| 0.348442
| 0
| 0.774297
| 1
| 0
| 0.141109
| 0.072872
| 0
| 0
| 0
| 0
| 0
| 1
| 0.209639
| false
| 0
| 0.004016
| 0.004016
| 0.329317
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
37db4ffeb12eeb9b86aa83ade66bb0de14fcee84
| 155
|
py
|
Python
|
bob/ami/__init__.py
|
intergalactic-software/bob
|
223558be6657d910488704850f5b8db65aeb1295
|
[
"MIT"
] | null | null | null |
bob/ami/__init__.py
|
intergalactic-software/bob
|
223558be6657d910488704850f5b8db65aeb1295
|
[
"MIT"
] | null | null | null |
bob/ami/__init__.py
|
intergalactic-software/bob
|
223558be6657d910488704850f5b8db65aeb1295
|
[
"MIT"
] | 1
|
2020-07-03T16:23:03.000Z
|
2020-07-03T16:23:03.000Z
|
from bob.ami.AMI import *
from bob.ami.FTPServerApp import *
from bob.ami.Hub import *
from bob.ami.NetworkWatcher import *
from bob.ami.TCPServer import *
| 31
| 36
| 0.780645
| 25
| 155
| 4.84
| 0.32
| 0.289256
| 0.413223
| 0.528926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122581
| 155
| 5
| 37
| 31
| 0.889706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
37e4ed92ca3db319b0ca2b956cbbf4bff26e7bfd
| 85,338
|
py
|
Python
|
autotest/ogr/ogr_plscenes.py
|
robe2/gdal
|
78573efe69f1506c112209501068c0b043438295
|
[
"MIT"
] | null | null | null |
autotest/ogr/ogr_plscenes.py
|
robe2/gdal
|
78573efe69f1506c112209501068c0b043438295
|
[
"MIT"
] | null | null | null |
autotest/ogr/ogr_plscenes.py
|
robe2/gdal
|
78573efe69f1506c112209501068c0b043438295
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: PlanetLabs scene driver test suite.
# Author: Even Rouault, even dot rouault at spatialys.com
#
###############################################################################
# Copyright (c) 2015, Planet Labs
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
sys.path.append( '../pymod' )
from osgeo import ogr
from osgeo import gdal
import gdaltest
###############################################################################
# Find PLScenes driver
def ogr_plscenes_1():
gdaltest.plscenes_drv = ogr.GetDriverByName('PLScenes')
if gdaltest.plscenes_drv is not None:
return 'success'
else:
return 'skip'
###############################################################################
# Various tests on a /vsimem/ "server"
def ogr_plscenes_2():
if gdaltest.plscenes_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/root', '{"ortho":"/vsimem/root/ortho/"}')
gdal.FileFromMemBuffer('/vsimem/valid_root_but_invalid_child',
'{"ortho":"/vsimem/valid_root_but_invalid_child/invalid_child/"}')
# Error: no API_KEY
gdal.PushErrorHandler()
gdal.SetConfigOption('PL_URL', '/vsimem/root/')
old_key = gdal.GetConfigOption('PL_API_KEY')
if old_key:
gdal.SetConfigOption('PL_API_KEY', '')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR)
if old_key:
gdal.SetConfigOption('PL_API_KEY', old_key)
gdal.SetConfigOption('PL_URL', None)
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
# Error case
gdal.PushErrorHandler()
gdal.SetConfigOption('PL_URL', '/vsimem/does_not_exist/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
gdal.PopErrorHandler()
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
# Error case
gdal.SetConfigOption('PL_URL', '/vsimem/valid_root_but_invalid_child/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
gdal.PushErrorHandler()
ret = ds.GetLayer(0).GetFeatureCount()
gdal.PopErrorHandler()
if ret != 0:
gdaltest.post_reason('fail')
return 'fail'
# Error cases
for ortho_json in [ """{}""",
'"valid_json_but_not_a_json_object"',
"""{ invalid_json,""",
"""{ "type": "FeatureCollection" }""",
"""{ "type": "FeatureCollection", "count": -1 }""",
"""{ "type": "FeatureCollection", "count": 0 }""",
"""{ "type": "FeatureCollection", "count": 1 }""",
"""{ "type": "FeatureCollection", "count": 1, "features": [] }""",
]:
gdal.FileFromMemBuffer('/vsimem/root/ortho/?count=1000', ortho_json)
gdal.SetConfigOption('PL_URL', '/vsimem/root/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
lyr = ds.GetLayer(0)
gdal.PushErrorHandler()
f = lyr.GetNextFeature()
gdal.PopErrorHandler()
if f:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer('/vsimem/root/ortho/?count=1', """{
"count": 2,
}""")
gdal.FileFromMemBuffer('/vsimem/root/ortho/?count=1000', """{
"type": "FeatureCollection",
"count": 2,
"features": [
{
"type": "Feature",
"id": "my_id",
"geometry": {
"coordinates": [ [ [2,49],[2,50],[3,50],[3,49],[2,49] ] ],
"type": "Polygon"
},
"properties": {
"acquired" : "2015-03-27T12:34:56.123+00",
"camera" : {
"bit_depth" : 12,
"color_mode": "RGB"
},
"cloud_cover" : {
"estimated" : 0.25
}
}
}
],
"links": {
"next" : "/vsimem/root/ortho/?count=1000&page=2"
}
}""")
gdal.FileFromMemBuffer('/vsimem/root/ortho/?count=1000&page=2', """{
"type": "FeatureCollection",
"count": 1,
"features": [
{
"type": "Feature",
"id": "my_id2",
"geometry": null,
"properties": {}
}
],
"links": {
"next" : null
}
}""")
my_id_only = """{
"type": "FeatureCollection",
"count": 1,
"features": [
{
"type": "Feature",
"id": "my_id",
"geometry": {
"coordinates": [ [ [2,49],[2,50],[3,50],[3,49],[2,49] ] ],
"type": "Polygon"
},
"properties": {
"acquired" : "2015-03-27T12:34:56.123+00",
"camera" : {
"bit_depth" : 12,
"color_mode": "RGB"
},
"cloud_cover" : {
"estimated" : 0.25
}
}
}
],
"links": {
"next" : null
}
}"""
gdal.FileFromMemBuffer('/vsimem/root/ortho/?count=1&intersects=POINT(2.5%2049.5)',
my_id_only)
gdal.FileFromMemBuffer('/vsimem/root/ortho/?count=1&intersects=POLYGON%20((2%2049,2%2050,3%2050,3%2049,2%2049))',
my_id_only)
gdal.FileFromMemBuffer('/vsimem/root/ortho/?count=1000&intersects=POINT(2.5%2049.5)',
my_id_only)
gdal.FileFromMemBuffer('/vsimem/root/ortho/?count=1000&camera.color_mode.eq=RGB&acquired.gte=2015-03-27T12:34:56&acquired.lt=2015-03-27T12:34:57&cloud_cover.estimated.gt=0.20000000&camera.bit_depth.lte=12&camera.bit_depth.gte=12&camera.bit_depth.lt=13',
my_id_only)
gdal.FileFromMemBuffer('/vsimem/root/ortho/?count=1&camera.color_mode.eq=RGB&acquired.gte=2015-03-27T12:34:56&acquired.lt=2015-03-27T12:34:57&cloud_cover.estimated.gt=0.20000000&camera.bit_depth.lte=12&camera.bit_depth.gte=12&camera.bit_depth.lt=13',
my_id_only)
gdal.SetConfigOption('PL_URL', '/vsimem/root/')
gdal.PushErrorHandler()
ds = gdal.OpenEx('PLScenes:api_key=foo,unsupported_option=val', gdal.OF_VECTOR)
gdal.PopErrorHandler()
gdal.SetConfigOption('PL_URL', None)
if ds is not None or gdal.GetLastErrorMsg().find('Unsupported option') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.SetConfigOption('PL_URL', '/vsimem/root/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetLayerCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetLayer(-1) or ds.GetLayer(1):
gdaltest.post_reason('fail')
return 'fail'
lyr = ds.GetLayerByName('ortho')
if lyr.TestCapability(ogr.OLCFastFeatureCount) != 1 or \
lyr.TestCapability(ogr.OLCStringsAsUTF8) != 1 or \
lyr.TestCapability(ogr.OLCRandomRead) != 0:
gdaltest.post_reason('fail')
return 'fail'
if lyr.GetFeatureCount() != 2:
gdaltest.post_reason('fail')
return 'fail'
ext = lyr.GetExtent()
if ext != (2.0, 3.0, 49.0, 50.0):
gdaltest.post_reason('fail')
print(ext)
return 'fail'
#lyr.ResetReading()
f = lyr.GetNextFeature()
if f.id != 'my_id' or f.acquired != '2015/03/27 12:34:56.123+00' or \
f['cloud_cover.estimated'] != 0.25:
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
if f.GetGeometryRef().ExportToWkt() != 'MULTIPOLYGON (((2 49,2 50,3 50,3 49,2 49)))':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
f = lyr.GetNextFeature()
if f.id != 'my_id2':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
if lyr.GetFeatureCount() != 2:
gdaltest.post_reason('fail')
return 'fail'
f = lyr.GetNextFeature()
if f is not None:
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
lyr.SetSpatialFilterRect(-1000,-1000,1000,1000)
if lyr.GetFeatureCount() != 2:
gdaltest.post_reason('fail')
return 'fail'
lyr.SetSpatialFilterRect(2.5,49.5,2.5,49.5)
f = lyr.GetNextFeature()
if f.id != 'my_id':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
lyr.SetSpatialFilter(None)
# Filter that can be passed to server side
filterstring = "\"camera.color_mode\" = 'RGB' AND acquired = '2015/03/27 12:34:56' AND \"cloud_cover.estimated\" > 0.2 AND \"camera.bit_depth\" <= 12 AND \"camera.bit_depth\" >= 12 AND \"camera.bit_depth\" < 13"
lyr.SetAttributeFilter(filterstring)
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
f = lyr.GetNextFeature()
if f.id != 'my_id':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
f = lyr.GetNextFeature()
if f is not None:
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
# Same but invert GetNextFeature() and GetFeatureCount()
lyr.SetAttributeFilter(filterstring)
f = lyr.GetNextFeature()
if f.id != 'my_id':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
f = lyr.GetNextFeature()
if f is not None:
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
# Filter that can be - partly - passed to server side
filterstring = "fid = 1 AND \"camera.color_mode\" = 'RGB' AND acquired = '2015/03/27 12:34:56' AND \"cloud_cover.estimated\" > 0.2 AND \"camera.bit_depth\" <= 12 AND \"camera.bit_depth\" >= 12 AND \"camera.bit_depth\" < 13"
lyr.SetAttributeFilter(filterstring)
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
f = lyr.GetNextFeature()
if f.id != 'my_id':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
f = lyr.GetNextFeature()
if f is not None:
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
# Same but invert GetNextFeature() and GetFeatureCount()
lyr.SetAttributeFilter(filterstring)
f = lyr.GetNextFeature()
if f.id != 'my_id':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
f = lyr.GetNextFeature()
if f is not None:
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
# Filter that cannot be passed to server side
filterstring = "fid = 1"
lyr.SetAttributeFilter(filterstring)
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
f = lyr.GetNextFeature()
if f.id != 'my_id':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
# Same but invert GetNextFeature() and GetFeatureCount()
lyr.SetAttributeFilter(filterstring)
f = lyr.GetNextFeature()
if f.id != 'my_id':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
# Filter on id
gdal.FileFromMemBuffer('/vsimem/root/ortho/my_id', """{
"type": "Feature",
"id": "my_id",
"geometry": {
"coordinates": [ [ [2,49],[2,50],[3,50],[3,49],[2,49] ] ],
"type": "Polygon"
},
"properties": {
"acquired" : "2015-03-27T12:34:56.123+00",
"camera" : {
"bit_depth" : 12,
"color_mode": "RGB"
},
"cloud_cover" : {
"estimated" : 0.25
}
}
}""")
filterstring = "id = 'my_id'"
lyr.SetAttributeFilter(filterstring)
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
f = lyr.GetNextFeature()
if f.id != 'my_id':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
# Same but invert GetNextFeature() and GetFeatureCount()
lyr.SetAttributeFilter(filterstring)
f = lyr.GetNextFeature()
if f.id != 'my_id':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
gdal.ErrorReset()
lyr.SetAttributeFilter("id = 'non_existing_id'")
if lyr.GetFeatureCount() != 0 or gdal.GetLastErrorMsg() != '':
gdaltest.post_reason('fail')
return 'fail'
f = lyr.GetNextFeature()
if f is not None or gdal.GetLastErrorMsg() != '':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
# Unset attribute filter
lyr.SetAttributeFilter(None)
f = lyr.GetNextFeature()
if f.id != 'my_id':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
# Regular ExecuteSQL
sql_lyr = ds.ExecuteSQL("select * from ortho")
f = sql_lyr.GetNextFeature()
if f.id != 'my_id':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
ds.ReleaseResultSet(sql_lyr)
# Test ordered by optimization
gdal.FileFromMemBuffer('/vsimem/root/ortho/?count=1000&order_by=acquired%20asc', """{
"type": "FeatureCollection",
"count": 2,
"features": [
{
"type": "Feature",
"id": "my_id2",
"geometry": null,
"properties": {}
}
],
"links": {
"next" : null
}
}""")
sql_lyr = ds.ExecuteSQL("select * from ortho order by acquired asc")
f = sql_lyr.GetNextFeature()
if f.id != 'my_id2':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
ds.ReleaseResultSet(sql_lyr)
# Test spat option
ds = None
gdal.SetConfigOption('PL_URL', '/vsimem/root/')
ds = gdal.OpenEx('PLScenes:spat=2 49 3 50', gdal.OF_VECTOR, open_options = ['API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
lyr = ds.GetLayer(0)
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
lyr.SetSpatialFilterRect(2.5,49.5,2.5,49.5)
if lyr.GetFeatureCount() != 1 and gdal.GetLastErrorMsg().find('GEOS support not enabled') < 0:
gdaltest.post_reason('fail')
return 'fail'
ds = None
gdal.SetConfigOption('PL_URL', '/vsimem/root/')
ds = gdal.OpenEx('PLScenes:spat=2.5 49.5 2.5 49.5', gdal.OF_VECTOR, open_options = ['API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
lyr = ds.GetLayer(0)
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
ds = None
gdal.Unlink('/vsimem/root')
gdal.Unlink('/vsimem/valid_root_but_invalid_child')
gdal.Unlink('/vsimem/root/ortho/?count=1')
gdal.Unlink('/vsimem/root/ortho/?count=1000')
gdal.Unlink('/vsimem/root/ortho/?count=1000&page=2')
gdal.Unlink('/vsimem/root/ortho/?count=1&intersects=POINT(2.5%2049.5)')
gdal.Unlink('/vsimem/root/ortho/?count=1&intersects=POLYGON%20((2%2049,2%2050,3%2050,3%2049,2%2049))')
gdal.Unlink('/vsimem/root/ortho/?count=1000&intersects=POINT(2.5%2049.5)')
gdal.Unlink('/vsimem/root/ortho/?count=1&camera.color_mode.eq=RGB&acquired.gte=2015-03-27T12:34:56&acquired.lt=2015-03-27T12:34:57&cloud_cover.estimated.gt=0.20000000&camera.bit_depth.lte=12&camera.bit_depth.gte=12&camera.bit_depth.lt=13')
gdal.Unlink('/vsimem/root/ortho/?count=1000&camera.color_mode.eq=RGB&acquired.gte=2015-03-27T12:34:56&acquired.lt=2015-03-27T12:34:57&cloud_cover.estimated.gt=0.20000000&camera.bit_depth.lte=12&camera.bit_depth.gte=12&camera.bit_depth.lt=13')
gdal.Unlink('/vsimem/root/ortho/?count=1000&order_by=acquired%20asc')
gdal.Unlink('/vsimem/root/ortho/my_id')
return 'success'
###############################################################################
# Raster access on a /vsimem/ "server"
def ogr_plscenes_3():
if gdaltest.plscenes_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/root', '{"ortho":"/vsimem/root/ortho/"}')
# Error case: missing scene
gdal.SetConfigOption('PL_URL', '/vsimem/root/')
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = gdal.OpenEx('PLScenes:', gdal.OF_RASTER, open_options = ['API_KEY=foo', 'SCENE=not_existing_scene'])
gdal.PopErrorHandler()
gdal.SetConfigOption('PL_URL', None)
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
# Error case: invalid scene JSon
gdal.FileFromMemBuffer('/vsimem/root/ortho/my_id', """{""")
gdal.SetConfigOption('PL_URL', '/vsimem/root/')
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = gdal.OpenEx('PLScenes:', gdal.OF_RASTER, open_options = ['API_KEY=foo', 'SCENE=my_id'])
gdal.PopErrorHandler()
gdal.SetConfigOption('PL_URL', None)
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
# Error case: missing properties.
gdal.FileFromMemBuffer('/vsimem/root/ortho/my_id', """{}""")
gdal.SetConfigOption('PL_URL', '/vsimem/root/')
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = gdal.OpenEx('PLScenes:', gdal.OF_RASTER, open_options = ['API_KEY=foo', 'SCENE=my_id'])
gdal.PopErrorHandler()
gdal.SetConfigOption('PL_URL', None)
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer('/vsimem/root/ortho/my_id', """{
"type": "Feature",
"id": "my_id",
"geometry": {
"coordinates": [ [ [2,49],[2,50],[3,50],[3,49],[2,49] ] ],
"type": "Polygon"
},
"properties": {
"acquired" : "2015-03-27T12:34:56.123+00",
"camera" : {
"bit_depth" : 12,
"color_mode": "RGB"
},
"cloud_cover" : {
"estimated" : 0.25
}
}
}""")
# Error case: missing links
gdal.SetConfigOption('PL_URL', '/vsimem/root/')
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = gdal.OpenEx('PLScenes:', gdal.OF_RASTER, open_options = ['API_KEY=foo', 'SCENE=my_id'])
gdal.PopErrorHandler()
gdal.SetConfigOption('PL_URL', None)
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer('/vsimem/root/ortho/my_id', """{
"type": "Feature",
"id": "my_id",
"geometry": {
"coordinates": [ [ [2,49],[2,50],[3,50],[3,49],[2,49] ] ],
"type": "Polygon"
},
"properties": {
"acquired" : "2015-03-27T12:34:56.123+00",
"camera" : {
"bit_depth" : 12,
"color_mode": "RGB"
},
"cloud_cover" : {
"estimated" : 0.25
},
"data": {
"products": {
"visual": {
"full": "/vsimem/root/ortho/my_id/full?product=visual"
},
"analytic": {
"full": "/vsimem/root/ortho/my_id/full?product=analytic"
}
}
},
"links": {
"thumbnail": "/vsimem/root/ortho/my_id/thumb"
}
}
}""")
# Error case: raster file not accessible
gdal.SetConfigOption('PL_URL', '/vsimem/root/')
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = gdal.OpenEx('PLScenes:', gdal.OF_RASTER, open_options = ['API_KEY=foo', 'SCENE=my_id'])
gdal.PopErrorHandler()
gdal.SetConfigOption('PL_URL', None)
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
# Now everything ok
gdal.FileFromMemBuffer('/vsimem/root/ortho/my_id/full?product=visual',
open('../gcore/data/byte.tif', 'rb').read())
gdal.FileFromMemBuffer('/vsimem/root/ortho/my_id/full?product=analytic',
open('../gcore/data/byte.tif', 'rb').read())
gdal.FileFromMemBuffer('/vsimem/root/ortho/my_id/thumb',
open('../gcore/data/byte.tif', 'rb').read())
gdal.SetConfigOption('PL_URL', '/vsimem/root/')
gdal.PushErrorHandler()
ds = gdal.OpenEx('PLScenes:api_key=foo,scene=my_id,unsupported_option=val', gdal.OF_RASTER)
gdal.PopErrorHandler()
gdal.SetConfigOption('PL_URL', None)
if ds is not None or gdal.GetLastErrorMsg().find('Unsupported option unsupported_option') < 0:
gdaltest.post_reason('fail')
print(gdal.GetLastErrorMsg())
return 'fail'
gdal.SetConfigOption('PL_URL', '/vsimem/root/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_RASTER, open_options = ['API_KEY=foo', 'SCENE=my_id'])
gdal.SetConfigOption('PL_URL', None)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
md = ds.GetMetadata()
if md['id'] != 'my_id':
gdaltest.post_reason('fail')
return 'fail'
ds = None
gdal.SetConfigOption('PL_URL', '/vsimem/root/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_RASTER, open_options = ['API_KEY=foo', 'SCENE=my_id', 'PRODUCT_TYPE=analytic'])
gdal.SetConfigOption('PL_URL', None)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
md = ds.GetMetadata()
if md['id'] != 'my_id':
gdaltest.post_reason('fail')
return 'fail'
ds = None
gdal.SetConfigOption('PL_URL', '/vsimem/root/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_RASTER, open_options = ['API_KEY=foo', 'SCENE=my_id', 'PRODUCT_TYPE=thumb'])
gdal.SetConfigOption('PL_URL', None)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = None
gdal.Unlink('/vsimem/root')
gdal.Unlink('/vsimem/root/ortho/my_id/full?product=visual')
gdal.Unlink('/vsimem/root/ortho/my_id/full?product=analytic')
gdal.Unlink('/vsimem/root/ortho/my_id/thumb')
gdal.Unlink('/vsimem/root/ortho/my_id')
return 'success'
###############################################################################
# Test accessing non-ortho scene type
def ogr_plscenes_4():
if gdaltest.plscenes_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/root', '{"ortho":"/vsimem/root/ortho/"}')
gdal.FileFromMemBuffer('/vsimem/root/another_layer/?count=10', """{
"type": "FeatureCollection",
"count": 1,
"features": [
{
"type": "Feature",
"id": "my_id",
"properties": {
"prop_10": "prop_10",
"prop_1" : "prop_1"
}
}
]
}""")
gdal.FileFromMemBuffer('/vsimem/root/another_layer/?count=1000', """{
"type": "FeatureCollection",
"count": 1,
"features": [
{
"type": "Feature",
"id": "my_id",
"properties": {
"prop_10": "prop_10",
"prop_1" : "prop_1"
}
}
]
}""")
gdal.SetConfigOption('PL_URL', '/vsimem/root/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetLayerCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
lyr = ds.GetLayerByName('another_layer')
if lyr is None:
gdaltest.post_reason('fail')
return 'fail'
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
if f['prop_1'] != 'prop_1' or f['prop_10'] != 'prop_10':
gdaltest.post_reason('fail')
return 'fail'
gdal.PushErrorHandler()
lyr = ds.GetLayerByName('does_not_exist')
gdal.PopErrorHandler()
if lyr is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = None
gdal.FileFromMemBuffer('/vsimem/root', '{"another_layer":"/vsimem/root/another_layer/"}')
gdal.SetConfigOption('PL_URL', '/vsimem/root/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
if f['prop_1'] != 'prop_1' or f['prop_10'] != 'prop_10':
gdaltest.post_reason('fail')
return 'fail'
ds = None
gdal.Unlink('/vsimem/root')
gdal.Unlink('/vsimem/root/another_layer/?count=10')
gdal.Unlink('/vsimem/root/another_layer/?count=1000')
return 'success'
###############################################################################
# Test V1 API catalog listing with a single catalog
def ogr_plscenes_v1_catalog_no_paging():
if gdaltest.plscenes_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/v1/catalogs', '{"_links": {}, "catalogs": [{"count": 2, "_links": { "items": "/vsimem/v1/catalogs/my_catalog/items/", "spec": "/vsimem/v1/catalogs/my_catalog/spec"}, "id": "my_catalog"}]}')
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['VERSION=v1', 'API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
with gdaltest.error_handler():
if ds.GetLayerByName('non_existing') is not None:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetLayerByName('my_catalog') is None:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetLayerCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
with gdaltest.error_handler():
if ds.GetLayerByName('non_existing') is not None:
gdaltest.post_reason('fail')
return 'fail'
gdal.Unlink('/vsimem/v1/catalogs')
return 'success'
###############################################################################
# Test V1 API catalog listing with catalog paging
def ogr_plscenes_v1_catalog_paging():
if gdaltest.plscenes_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/v1/catalogs', '{"_links": { "_next" : "/vsimem/v1/catalogs_page_2"}, "catalogs": [{"count": 2, "_links": { "items": "/vsimem/v1/catalogs/my_catalog/items/", "spec": "/vsimem/v1/catalogs/my_catalog/spec"}, "id": "my_catalog"}]}')
gdal.FileFromMemBuffer('/vsimem/v1/catalogs_page_2', '{ "_links": { "_next" : "/vsimem/v1/catalogs_page_3"}, "catalogs": [{"count": 2, "_links": { "items": "/vsimem/v1/catalogs/my_catalog_2/items/", "spec": "/vsimem/v1/catalogs/my_catalog_2/spec"}, "id": "my_catalog_2"}]}')
gdal.FileFromMemBuffer('/vsimem/v1/catalogs_page_3', '{ "catalogs": [{"count": 2, "_links": { "items": "/vsimem/v1/catalogs/my_catalog_3/items/", "spec": "/vsimem/v1/catalogs/my_catalog_3/spec"}, "id": "my_catalog_3"}]}')
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['VERSION=v1', 'API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
with gdaltest.error_handler():
if ds.GetLayerByName('non_existing') is not None:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetLayerByName('my_catalog') is None:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog_2', '{ "_links": { "items": "/vsimem/v1/catalogs/my_catalog_2/items/", "spec": "/vsimem/v1/catalogs/my_catalog_2/spec"}, "id": "my_catalog_2"} }')
if ds.GetLayerByName('my_catalog_2') is None:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetLayerCount() != 3:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetLayerByName('my_catalog_2') is None:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetLayerByName('my_catalog_3') is None:
gdaltest.post_reason('fail')
return 'fail'
with gdaltest.error_handler():
if ds.GetLayerByName('non_existing') is not None:
gdaltest.post_reason('fail')
return 'fail'
gdal.Unlink('/vsimem/v1/catalogs')
gdal.Unlink('/vsimem/v1/catalogs_page_2')
gdal.Unlink('/vsimem/v1/catalogs_page_3')
gdal.Unlink('/vsimem/v1/catalogs/my_catalog_2')
return 'success'
###############################################################################
# Test V1 API
def ogr_plscenes_v1_nominal():
if gdaltest.plscenes_drv is None:
return 'skip'
gdal.FileFromMemBuffer('/vsimem/v1/catalogs',
"""{"_links": {}, "catalogs": [
{"count": 2,
"_links": { "items": "/vsimem/v1/catalogs/my_catalog/items/", "spec": "/vsimem/v1/catalogs/my_catalog/spec"},
"asset_categories": {"analytic": {"description": "", "id": "analytic", "name": "Analytic Products"}, "visual": {"description": "", "id": "visual", "name": "Visual Products"}},
"id": "my_catalog"}
]}""")
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['VERSION=v1', 'API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds = gdal.OpenEx('PLScenes:version=v1,api_key=foo', gdal.OF_VECTOR)
gdal.SetConfigOption('PL_URL', None)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
lyr = ds.GetLayer(0)
if lyr.GetName() != 'my_catalog':
gdaltest.post_reason('fail')
return 'fail'
if lyr.TestCapability(ogr.OLCFastFeatureCount) != 1 or \
lyr.TestCapability(ogr.OLCStringsAsUTF8) != 1 or \
lyr.TestCapability(ogr.OLCRandomRead) != 0:
gdaltest.post_reason('fail')
return 'fail'
if lyr.GetFeatureCount() != 2:
gdaltest.post_reason('fail')
return 'fail'
if lyr.GetGeomType() != ogr.wkbMultiPolygon:
gdaltest.post_reason('fail')
return 'fail'
ext = lyr.GetExtent()
if ext != (-180.0, 180.0, -90.0, 90.0):
gdaltest.post_reason('fail')
print(ext)
return 'fail'
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/spec',
"""{
"paths": {
"/catalogs/my_catalog/items/" : {
"get": {
"responses": {
"200": {
"schema": {
"$ref": "#/definitions/ItemPage"
}
}
},
"parameters": [
{
"$ref":"#/parameters/qFloat"
},
{
"$ref":"#/parameters/qCreated"
},
{
"$ref":"#/parameters/qInt32"
},
{
"$ref":"#/parameters/qString"
}
]
}
}
},
"parameters": {
"qFloat":{
"in": "query",
"name": "catalog::float"
},
"qCreated":{
"in": "query",
"name": "created"
},
"qInt32":{
"in": "query",
"name": "catalog::int32"
},
"qString":{
"in": "query",
"name": "catalog::string"
}
},
"definitions": {
"ItemPage": {
"type": "object",
"allOf": [
{
"$ref": "#/definitions/GeoJSONFeatureCollection"
},
{
"type": "object",
"properties": {
"_links": {
"$ref": "#/definitions/PageLinks"
},
"features": {
"items": {
"$ref": "#/definitions/Item"
},
"type": "array"
}
}
}
]
},
"Item": {
"allOf": [
{
"$ref": "#/definitions/GeoJSONFeature"
},
{
"properties": {
"_embeds": {
"$ref": "#/definitions/ItemEmbeds"
},
"_links": {
"$ref": "#/definitions/ItemLinks"
},
"id": {
"type": "string"
},
"properties": {
"$ref": "#/definitions/ItemProperties"
}
}
}
]
},
"ItemEmbeds": {
"properties": {
"assets": {
"additionalProperties": {
"$ref": "#/definitions/ItemAsset"
},
"type": "object"
}
},
"type": "object"
},
"ItemAsset": {
"properties": {
"_links": {
"$ref": "#/definitions/SelfLink"
},
"category_id": {
"description": "Category identifier of this ItemAsset.",
"type": "string"
},
"file": {
"description": "RFC 3986 URI representing a location that will either directly serve the underlying asset data, or redirect to a location that will. A client must never attempt to construct this URI, as only its behavior is governed by this specification, not its location. In the event that a 202 is returned from a GET request against this URI, the response's `X-Retry-After` header indicates how long the client should wait before reattempting the request.",
"type": "string"
},
"mimetype": {
"description": "The MIME type of the underlying asset file.",
"type": "string"
}
}
},
"ItemLinks": {
"allOf": [
{
"$ref": "#/definitions/SelfLink"
},
{
"type": "object",
"properties": {
"assets": {
"type": "string",
}
}
}
]
},
"SelfLink": {
"type": "object",
"properties": {
"_self": {
"type": "string",
}
}
},
"ItemProperties": {
"allOf": [
{
"$ref": "#/definitions/CoreItemProperties"
},
{
"$ref": "#/definitions/ExtraItemProperties"
}
]
},
"CoreItemProperties": {
"required": [
"created"
],
"type": "object",
"properties": {
"created": {
"type": "string",
"format": "date-time"
}
}
},
"ExtraItemProperties": {
"type": "object",
"properties": {
"catalog::float": {
"format": "float",
"type": "number",
},
"catalog::string": {
"type": "string",
},
"catalog::int32": {
"format": "int32",
"type": "integer",
},
"catalog::int64": {
"format": "int64",
"type": "integer",
}
}
}
}
}
""")
expected_md = """{
"id":{
"type":"string",
"src_field":"id",
"server_queryable":true
},
"self_link":{
"type":"string",
"src_field":"_links._self",
"server_queryable":false
},
"assets_link":{
"type":"string",
"src_field":"_links.assets",
"server_queryable":false
},
"created":{
"type":"string",
"format":"date-time",
"src_field":"properties.created",
"server_queryable":true
},
"float":{
"format":"float",
"type":"number",
"src_field":"properties.catalog::float",
"server_queryable":true
},
"string":{
"type":"string",
"src_field":"properties.catalog::string",
"server_queryable":true
},
"int32":{
"format":"int32",
"type":"integer",
"src_field":"properties.catalog::int32",
"server_queryable":true
},
"int64":{
"format":"int64",
"type":"integer",
"src_field":"properties.catalog::int64",
"server_queryable":false
},
"asset_analytic_self_link":{
"description":"RFC 3986 URI representing the canonical location of this asset.",
"type":"string",
"src_field":"_embeds.assets.analytic._links._self",
"server_queryable":false
},
"asset_analytic_file":{
"description":"RFC 3986 URI representing a location that will either directly serve the underlying asset data, or redirect to a location that will. A client must never attempt to construct this URI, as only its behavior is governed by this specification, not its location. In the event that a 202 is returned from a GET request against this URI, the response's `X-Retry-After` header indicates how long the client should wait before reattempting the request.",
"type":"string",
"src_field":"_embeds.assets.analytic.file",
"server_queryable":false
},
"asset_analytic_mimetype":{
"description":"The MIME type of the underlying asset file.",
"type":"string",
"src_field":"_embeds.assets.analytic.mimetype",
"server_queryable":false
},
"asset_visual_self_link":{
"description":"RFC 3986 URI representing the canonical location of this asset.",
"type":"string",
"src_field":"_embeds.assets.visual._links._self",
"server_queryable":false
},
"asset_visual_file":{
"description":"RFC 3986 URI representing a location that will either directly serve the underlying asset data, or redirect to a location that will. A client must never attempt to construct this URI, as only its behavior is governed by this specification, not its location. In the event that a 202 is returned from a GET request against this URI, the response's `X-Retry-After` header indicates how long the client should wait before reattempting the request.",
"type":"string",
"src_field":"_embeds.assets.visual.file",
"server_queryable":false
},
"asset_visual_mimetype":{
"description":"The MIME type of the underlying asset file.",
"type":"string",
"src_field":"_embeds.assets.visual.mimetype",
"server_queryable":false
}
}"""
md = lyr.GetMetadataItem('FIELDS_DESCRIPTION')
if md != expected_md:
gdaltest.post_reason('fail')
print(md)
return 'fail'
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['VERSION=v1', 'API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
lyr = ds.GetLayer(0)
md = lyr.GetMetadata()['FIELDS_DESCRIPTION']
if md != expected_md:
gdaltest.post_reason('fail')
print(md)
return 'fail'
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['VERSION=v1', 'API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
lyr = ds.GetLayer(0)
field_count = lyr.GetLayerDefn().GetFieldCount()
if field_count != 14:
gdaltest.post_reason('fail')
print(field_count)
return 'fail'
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/items/?_embeds=features.*.assets&_page_size=1000',
"""{
"_links":
{
"_next": "/vsimem/v1/catalogs/my_catalog/items_page2/?_embeds=features.*.assets&_page_size=1000"
},
"features" : [
{
"id": "id",
"_links" : {
"_self" : "self",
"assets" : "assets"
},
"_embeds" : {
"assets": {
"visual" : {
"_links": {
"_self": "visual_links_self"
},
"file": "/vsimem/v1/catalogs/my_catalog/items/id/assets/visual/download",
"mimetype": "visual_mimetype"
}
}
},
"properties": {
"created" : "2016-02-11T12:34:56.789Z",
"catalog::float": 1.23,
"catalog::string": "string",
"catalog::int32": 123,
"catalog::int64": 1234567890123
},
"geometry":
{
"type": "Polygon",
"coordinates" : [ [ [2,49],[2,49.1],[2.1,49.1],[2.1,49],[2,49] ] ]
}
}
]
}""")
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['VERSION=v1', 'API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f.GetFID() != 1 or f['id'] != 'id' or f['self_link'] != 'self' or f['assets_link'] != 'assets' or \
f['created'] != '2016/02/11 12:34:56.789+00' or \
f['float'] != 1.23 or f['string'] != 'string' or f['int32'] != 123 or \
f['int64'] != 1234567890123 or \
f['asset_visual_self_link'] != 'visual_links_self' or \
f['asset_visual_file'] != '/vsimem/v1/catalogs/my_catalog/items/id/assets/visual/download' or \
f['asset_visual_mimetype'] != 'visual_mimetype' or \
f.GetGeometryRef().ExportToWkt() != 'MULTIPOLYGON (((2 49,2.0 49.1,2.1 49.1,2.1 49.0,2 49)))':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
lyr.ResetReading()
f = lyr.GetNextFeature()
if f.GetFID() != 1:
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
# Try raster access
# Missing catalog
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
with gdaltest.error_handler():
ds_raster = gdal.OpenEx('PLScenes:', gdal.OF_RASTER, open_options = ['VERSION=v1', 'API_KEY=foo', 'SCENE=id'])
gdal.SetConfigOption('PL_URL', None)
if ds_raster is not None or gdal.GetLastErrorMsg().find('Missing catalog') < 0:
gdaltest.post_reason('fail')
return 'fail'
# Invalid catalog
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
with gdaltest.error_handler():
ds_raster = gdal.OpenEx('PLScenes:', gdal.OF_RASTER, open_options = ['VERSION=v1', 'API_KEY=foo', 'CATALOG=invalid', 'SCENE=id'])
gdal.SetConfigOption('PL_URL', None)
# visual not an object
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/items/id/assets',
"""{ "visual": false }""")
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
with gdaltest.error_handler():
ds_raster = gdal.OpenEx('PLScenes:', gdal.OF_RASTER, open_options = ['VERSION=v1', 'API_KEY=foo', 'CATALOG=my_catalog', 'SCENE=id'])
gdal.SetConfigOption('PL_URL', None)
if ds_raster is not None:
gdaltest.post_reason('fail')
return 'fail'
# Missing file
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/items/id/assets',
"""{ "visual": { } }""")
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
with gdaltest.error_handler():
ds_raster = gdal.OpenEx('PLScenes:', gdal.OF_RASTER, open_options = ['VERSION=v1', 'API_KEY=foo', 'CATALOG=my_catalog', 'SCENE=id'])
gdal.SetConfigOption('PL_URL', None)
if ds_raster is not None:
gdaltest.post_reason('fail')
return 'fail'
# Empty file
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/items/id/assets',
"""{ "visual": { "file": "" } }""")
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
with gdaltest.error_handler():
ds_raster = gdal.OpenEx('PLScenes:', gdal.OF_RASTER, open_options = ['VERSION=v1', 'API_KEY=foo', 'CATALOG=my_catalog', 'SCENE=id'])
gdal.SetConfigOption('PL_URL', None)
if ds_raster is not None:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/items/id/assets',
"""{ "visual": { "file": "/vsimem/v1/catalogs/my_catalog/items/id/assets/visual/download"} }""")
# Missing /vsimem/v1/catalogs/my_catalog/items/id/assets/visual/download
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
with gdaltest.error_handler():
ds_raster = gdal.OpenEx('PLScenes:', gdal.OF_RASTER, open_options = ['VERSION=v1', 'API_KEY=foo', 'CATALOG=my_catalog', 'SCENE=id'])
gdal.SetConfigOption('PL_URL', None)
if ds_raster is not None or gdal.GetLastErrorMsg().find('The generation of the product is in progress. Retry later') < 0:
gdaltest.post_reason('fail')
return 'fail'
# JSon content for /vsimem/v1/catalogs/my_catalog/items/id/assets/visual/download
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/items/id/assets/visual/download',
"""{}""")
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
with gdaltest.error_handler():
ds_raster = gdal.OpenEx('PLScenes:', gdal.OF_RASTER, open_options = ['VERSION=v1', 'API_KEY=foo', 'CATALOG=my_catalog', 'SCENE=id'])
gdal.SetConfigOption('PL_URL', None)
if ds_raster is not None:
gdaltest.post_reason('fail')
return 'fail'
# Missing /vsimem/v1/catalogs/my_catalog
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/items/id/assets/visual/download',
open('../gcore/data/byte.tif', 'rb').read())
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
with gdaltest.error_handler():
ds_raster = gdal.OpenEx('PLScenes:', gdal.OF_RASTER, open_options = ['VERSION=v1', 'API_KEY=foo', 'CATALOG=my_catalog', 'SCENE=id'])
gdal.SetConfigOption('PL_URL', None)
if ds_raster is None:
gdaltest.post_reason('fail')
return 'fail'
# Failed filter by scene id
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog',
"""{"count": 2,
"_links": { "items": "/vsimem/v1/catalogs/my_catalog/items/", "spec": "/vsimem/v1/catalogs/my_catalog/spec"},
"asset_categories": {"analytic": {"description": "", "id": "analytic", "name": "Analytic Products"}, "visual": {"description": "", "id": "visual", "name": "Visual Products"}},
"id": "my_catalog"}""")
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds_raster = gdal.OpenEx('PLScenes:', gdal.OF_RASTER, open_options = ['VERSION=v1', 'API_KEY=foo', 'CATALOG=my_catalog', 'SCENE=id'])
gdal.SetConfigOption('PL_URL', None)
if ds_raster is None:
gdaltest.post_reason('fail')
return 'fail'
ds_raster = None
# Test metadata items attached to dataset
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/items/id?_embeds=assets',
"""{
"id": "id",
"properties": {
"catalog::int32": 123,
},
}""")
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds_raster = gdal.OpenEx('PLScenes:', gdal.OF_RASTER, open_options = ['VERSION=v1', 'API_KEY=foo', 'CATALOG=my_catalog', 'PRODUCT_TYPE=visual', 'SCENE=id'])
gdal.SetConfigOption('PL_URL', None)
if ds_raster is None:
gdaltest.post_reason('fail')
return 'fail'
if ds_raster.GetMetadataItem('int32') != '123':
gdaltest.post_reason('fail')
return 'fail'
ds_raster = None
# Test invalid product_type
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
with gdaltest.error_handler():
ds_raster = gdal.OpenEx('PLScenes:', gdal.OF_RASTER, open_options = ['VERSION=v1', 'API_KEY=foo', 'CATALOG=my_catalog', 'PRODUCT_TYPE=invalid', 'SCENE=id'])
gdal.SetConfigOption('PL_URL', None)
if ds_raster is not None:
gdaltest.post_reason('fail')
return 'fail'
# Test subdatasets
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds_raster = gdal.OpenEx('PLScenes:', gdal.OF_RASTER, open_options = ['VERSION=v1', 'API_KEY=foo', 'CATALOG=my_catalog', 'PRODUCT_TYPE=list', 'SCENE=id'])
gdal.SetConfigOption('PL_URL', None)
if len(ds_raster.GetSubDatasets()) != 1:
gdaltest.post_reason('fail')
return 'fail'
ds_raster = None
# Unsupported option
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
with gdaltest.error_handler():
ds_raster = gdal.OpenEx('PLScenes:unsupported=yes', gdal.OF_RASTER, open_options = ['VERSION=v1', 'API_KEY=foo', 'CATALOG=my_catalog', 'SCENE=id'])
gdal.SetConfigOption('PL_URL', None)
if ds_raster is not None:
gdaltest.post_reason('fail')
return 'fail'
# Test catalog with vector access
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds2 = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['VERSION=v1', 'API_KEY=foo', 'CATALOG=my_catalog'])
gdal.SetConfigOption('PL_URL', None)
if ds2 is None or ds2.GetLayerCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
with gdaltest.error_handler():
ds2 = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['VERSION=v1', 'API_KEY=foo', 'CATALOG=invalid'])
gdal.SetConfigOption('PL_URL', None)
if ds2 is not None:
gdaltest.post_reason('fail')
return 'fail'
gdal.Unlink('/vsimem/v1/catalogs/my_catalog')
gdal.Unlink('/vsimem/v1/catalogs/my_catalog/items/id/assets')
gdal.Unlink('/vsimem/v1/catalogs/my_catalog/items/id/assets/visual/download')
gdal.Unlink('/vsimem/v1/catalogs/my_catalog/items/id?_embeds=assets')
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/items_page2/?_embeds=features.*.assets&_page_size=1000',
"""{
"features" : [
{
"id": "id2"
}
]
}""")
f = lyr.GetNextFeature()
if f.GetFID() != 2 or f['id'] != 'id2':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
lyr.ResetReading()
f = lyr.GetNextFeature()
if f.GetFID() != 1:
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
f = lyr.GetNextFeature()
if f.GetFID() != 2:
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
f = lyr.GetNextFeature()
if f is not None:
gdaltest.post_reason('fail')
return 'fail'
f = lyr.GetNextFeature()
if f is not None:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/items/?_embeds=features.*.assets&_page_size=1000&geometry=POINT(2%2049)',
"""{
"features" : [
{
"id": "id3"
}
]
}""")
# POINT spatial filter
lyr.SetSpatialFilterRect(2,49,2,49)
f = lyr.GetNextFeature()
if f['id'] != 'id3':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/items/?_page_size=0&geometry=POINT(2%2049)',
"""{ "_result_count": 123456 }""")
if lyr.GetFeatureCount() != 123456:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/items/?_embeds=features.*.assets&_page_size=1000&geometry=POLYGON%20((2%2049,2.0%2049.1,2.1%2049.1,2.1%2049.0,2%2049))',
"""{
"features" : [
{
"id": "id4"
}
]
}""")
# POLYGON spatial filter
lyr.SetSpatialFilterRect(2,49,2.1,49.1)
f = lyr.GetNextFeature()
if f['id'] != 'id4':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
# Reset spatial filter
lyr.SetSpatialFilter(0, None)
f = lyr.GetNextFeature()
if f['id'] != 'id':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
# All world filter
lyr.SetSpatialFilterRect(-1000,-1000,1000,1000)
f = lyr.GetNextFeature()
if f['id'] != 'id':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
# Reset spatial filter
lyr.SetSpatialFilter(None)
f = lyr.GetNextFeature()
if f['id'] != 'id':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
# Test attribute filter on id (special case)
lyr.SetAttributeFilter("id = 'filtered_id'")
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/items/filtered_id?_embeds=assets',
"""{
"id": "filtered_id",
"properties": {}
}""")
f = lyr.GetNextFeature()
if f['id'] != 'filtered_id':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
# Test attribute filter fully evaluated on server side.
lyr.SetAttributeFilter("float >= 0 AND int32 < 3 AND float <= 1 AND string = 'foo' AND int32 > 1 AND created = '2016/02/11 12:34:56'")
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/items/?_embeds=features.*.assets&_page_size=1000&created=[2016-02-11T12:34:56Z:2016-02-11T12:34:57Z]&catalog::float=[0.00000000:1.00000000]&catalog::string=foo&catalog::int32=[1:3]',
"""{
"features" : [
{
"id": "filtered_0",
"properties": {
"catalog::float": 0.5,
"catalog::int32" : 3,
"catalog::string": "foo",
"created": "2016-02-11T12:34:56.789Z"
}
},
{
"id": "filtered_1",
"properties": {
"catalog::float": 0.5,
"catalog::int32" : 2,
"catalog::string": "foo",
"created": "2016-02-11T12:34:56.789Z"
}
}
]
}""")
f = lyr.GetNextFeature()
if f['id'] != 'filtered_1':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
# Another one but with no range
lyr.SetAttributeFilter("float > 0 AND int32 < 3 AND created > '2016/02/11 12:34:56'")
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/items/?_embeds=features.*.assets&_page_size=1000&created=[2016-02-11T12:34:56Z:]&catalog::float=[0.00000000:]&catalog::int32=[:3]',
"""{
"features" : [
{
"id": "filtered_2",
"properties": {
"catalog::float": 0.5,
"catalog::int32" : 2,
"catalog::string": "foo",
"created": "2016-02-11T12:34:56.789Z"
}
}
]
}""")
f = lyr.GetNextFeature()
if f['id'] != 'filtered_2':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
# Partly server / partly client
lyr.SetAttributeFilter("int64 = 4 AND string = 'foo' AND int32 >= 3 AND int32 >= 3 AND float >= 0 AND float <= 2 AND float <= 3")
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/items/?_embeds=features.*.assets&_page_size=1000&catalog::float=[:3.00000000]&catalog::string=foo&catalog::int32=[3:]',
"""{
"features" : [
{
"id": "filtered_3",
"properties": {
"catalog::int64" : 4,
"catalog::int32" : 4,
"catalog::float" : 1,
"catalog::string": "foo",
}
}
]
}""")
f = lyr.GetNextFeature()
if f['id'] != 'filtered_3':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
# Completely client side
lyr.SetAttributeFilter("int32 = 123 OR string = 'foo'")
f = lyr.GetNextFeature()
if f['id'] != 'id':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
# Reset attribute filter
lyr.SetAttributeFilter(None)
f = lyr.GetNextFeature()
if f['id'] != 'id':
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
gdal.Unlink('/vsimem/v1/catalogs')
gdal.Unlink('/vsimem/v1/catalogs/my_catalog/spec')
gdal.Unlink('/vsimem/v1/catalogs/my_catalog/items/?_embeds=features.*.assets&_page_size=1000')
gdal.Unlink('/vsimem/v1/catalogs/my_catalog/items_page2/?_embeds=features.*.assets&_page_size=1000')
gdal.Unlink('/vsimem/v1/catalogs/my_catalog/items/?_embeds=features.*.assets&_page_size=1000&geometry=POINT(2%2049)')
gdal.Unlink('/vsimem/v1/catalogs/my_catalog/items/?_embeds=features.*.assets&_page_size=1000&geometry=POLYGON%20((2%2049,2.0%2049.1,2.1%2049.1,2.1%2049.0,2%2049))')
gdal.Unlink('/vsimem/v1/catalogs/my_catalog/items/filtered_id?_embeds=assets')
gdal.Unlink('/vsimem/v1/catalogs/my_catalog/items/?_embeds=features.*.assets&_page_size=1000&created=[2016-02-11T12:34:56Z:2016-02-11T12:34:57Z]&catalog::float=[0.00000000:1.00000000]&catalog::string=foo&catalog::int32=[1:3]')
gdal.Unlink('/vsimem/v1/catalogs/my_catalog/items/?_embeds=features.*.assets&_page_size=1000&created=[2016-02-11T12:34:56Z:]&catalog::float=[0.00000000:]&catalog::int32=[:3]')
gdal.Unlink('/vsimem/v1/catalogs/my_catalog/items/?_embeds=features.*.assets&_page_size=1000&catalog::float=[:3.00000000]&catalog::string=foo&catalog::int32=[3:]')
gdal.Unlink('/vsimem/v1/catalogs/my_catalog/items/?_page_size=0&geometry=POINT(2%2049)')
return 'success'
###############################################################################
# Test robustness to errors in V1 API
def ogr_plscenes_v1_errors():
if gdaltest.plscenes_drv is None:
return 'skip'
# No PL_API_KEY
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
old_key = gdal.GetConfigOption('PL_API_KEY')
if old_key:
gdal.SetConfigOption('PL_API_KEY', '')
with gdaltest.error_handler():
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['VERSION=v1'])
if old_key:
gdal.SetConfigOption('PL_API_KEY', old_key)
gdal.SetConfigOption('PL_URL', None)
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
# Invalid option
gdal.FileFromMemBuffer('/vsimem/v1/catalogs', '{ "catalogs": [] }')
with gdaltest.error_handler():
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds = gdal.OpenEx('PLScenes:version=v1,api_key=foo,invalid=invalid', gdal.OF_VECTOR)
gdal.SetConfigOption('PL_URL', None)
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
# Invalid JSON
gdal.FileFromMemBuffer('/vsimem/v1/catalogs', '{invalid_json')
with gdaltest.error_handler():
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['VERSION=v1', 'API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
# Not an object
gdal.FileFromMemBuffer('/vsimem/v1/catalogs', 'false')
with gdaltest.error_handler():
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['VERSION=v1', 'API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
# Lack of "catalogs"
gdal.FileFromMemBuffer('/vsimem/v1/catalogs', '{}')
with gdaltest.error_handler():
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['VERSION=v1', 'API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
# Invalid catalog objects
gdal.FileFromMemBuffer('/vsimem/v1/catalogs', """{"catalogs": [{}, [], null, {"id":null},
{"id":"foo"},{"id":"foo", "_links":null},{"id":"foo", "_links":[]},{"id":"foo", "_links":{}},
{"id":"foo", "_links":{"spec": []}}, {"id":"foo", "_links":{"spec": "x", "items": []}}]}""")
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['VERSION=v1', 'API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
if ds.GetLayerCount() != 0:
gdaltest.post_reason('fail')
return 'fail'
# Invalid next URL
gdal.FileFromMemBuffer('/vsimem/v1/catalogs', '{"_links": { "_next": "/vsimem/inexisting" }, "catalogs": [{"count": 2, "_links": { "items": "/vsimem/v1/catalogs/my_catalog/items/", "spec": "/vsimem/v1/catalogs/my_catalog/spec"}, "id": "my_catalog"}]}')
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['VERSION=v1', 'API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
with gdaltest.error_handler():
lyr_count = ds.GetLayerCount()
if lyr_count != 1:
gdaltest.post_reason('fail')
return 'fail'
gdal.FileFromMemBuffer('/vsimem/v1/catalogs', '{ "catalogs": [{ "_links": { "items": "/vsimem/v1/catalogs/my_catalog/items/", "spec": "/vsimem/invalid_spec"}, "id": "my_catalog"}]}')
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['VERSION=v1', 'API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
lyr = ds.GetLayer(0)
with gdaltest.error_handler():
lyr.GetLayerDefn().GetFieldCount()
# Invalid asset_categories
gdal.FileFromMemBuffer('/vsimem/v1/catalogs', '{ "catalogs": [{ "_links": { "items": "/vsimem/v1/catalogs/my_catalog/items/", "spec": "/vsimem/invalid_spec"}, "asset_categories": false, "id": "my_catalog"}]}')
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['VERSION=v1', 'API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
lyr = ds.GetLayer(0)
with gdaltest.error_handler():
lyr.GetLayerDefn().GetFieldCount()
gdal.FileFromMemBuffer('/vsimem/v1/catalogs', '{ "catalogs": [{ "_links": { "items": "/vsimem/v1/catalogs/my_catalog/items/", "spec": "/vsimem/v1/catalogs/my_catalog/spec"}, "asset_categories": { "my_asset": {} }, "id": "my_catalog"}]}')
# Test various errors in spec
for spec in [ '{}', # no path
'{ "paths": [] }', # bad type
'{ "paths": {} }', # no path for /vsimem/v1/catalogs/my_catalog/items/
'{ "paths": { "/catalogs/my_catalog/items/" : false } }', # wrong type
'{ "paths": { "/catalogs/my_catalog/items/" : {} } }', # no schema
"""{ "paths": { "/catalogs/my_catalog/items/" : {"get": {
"responses": {
"200": {
"schema": {
"$ref": "#/definitions/ItemPage"
}
}
}
}} } }""", # wrong link
"""{ "paths": { "/catalogs/my_catalog/items/" : {"get": {
"responses": {
"200": {
"schema": {
"$ref": false
}
}
}
}} } }""", # invalid type for $ref
"""{ "paths": { "/catalogs/my_catalog/items/" : {"get": {
"responses": {
"200": {
"schema": {
"$ref": "#/definitions/ItemPage"
}
}
}
}} },
"definitions" :
{
"ItemPage": {}
}
}""", # Cannot find ItemPage allOf
"""{ "paths": { "/catalogs/my_catalog/items/" : {"get": {
"responses": {
"200": {
"schema": {
"properties": false
}
}
}
}} }}""", # Cannot find ItemPage properties
"""{ "paths": { "/catalogs/my_catalog/items/" : {"get": {
"responses": {
"200": {
"schema": {
"properties": {}
}
}
}
}} }}""", # Cannot find ItemPage properties.features.items
"""{ "paths": { "/catalogs/my_catalog/items/" : {"get": {
"responses": {
"200": {
"schema": {
"properties": {
"features": {
"items": {
"$ref": "#/definitions/Item"
}
}
}
}
}
}
}}}}""", # Cannot find object 'Item' of '#/definitions/Item'
"""{ "paths": { "/catalogs/my_catalog/items/" : {"get": {
"responses": {
"200": {
"schema": {
"properties": {
"features": {
"items": {
}
}
}
}
}
}
}}}}""", # Cannot find Item allOf
"""{ "paths": { "/catalogs/my_catalog/items/" : {"get": {
"responses": {
"200": {
"schema": {
"properties": {
"features": {
"items": {
"properties": false
}
}
}
}
}
}
}}}}""", # Cannot find Item properties
"""{ "paths": { "/catalogs/my_catalog/items/" : {"get": {
"responses": {
"200": {
"schema": {
"properties": {
"features": {
"items": {
"properties": {
}
}
}
}
}
}
}
}}}}""", # Cannot find Item properties.properties
"""{ "paths": { "/catalogs/my_catalog/items/" : {"get": {
"responses": {
"200": {
"schema": {
"properties": {
"features": {
"items": {
"properties": {
"properties": {
"$ref": "inexisting"
}
}
}
}
}
}
}
}
}}}}""", # Cannot expand ref inexisting
"""{ "paths": { "/catalogs/my_catalog/items/" : {"get": {
"responses": {
"200": {
"schema": {
"properties": {
"features": {
"items": {
"properties": {
"properties": {
}
}
}
}
}
}
}
},
"parameters": false
}}}}""", # Invalid parameters
"""{ "paths": { "/catalogs/my_catalog/items/" : {"get": {
"responses": {
"200": {
"schema": {
"properties": {
"features": {
"items": {
"properties": {
"properties": {
}
}
}
}
}
}
}
},
"parameters": [
null,
false,
{
"$ref": "inexisting2"
},
{},
{"name":false},
{"name":""},
{"name":"","in":false},
{"name":"","in":"foo"}
]
}}}}""", # Invalid parameters
"""{ "paths": { "/catalogs/my_catalog/items/" : {"get": {
"responses": {
"200": {
"schema": {
"properties": {
"features": {
"items": {
"properties": {
"properties": {
},
"_embeds": false
}
}
}
}
}
}
}
}}}}""", # invalid type for _embeds
"""{ "paths": { "/catalogs/my_catalog/items/" : {"get": {
"responses": {
"200": {
"schema": {
"properties": {
"features": {
"items": {
"properties": {
"properties": {
},
"_embeds": {
"properties": {
"assets": {
"additionalProperties": {
"$ref": "invalid_ref"
}
}
}
}
}
}
}
}
}
}
}
}}}}""", # invalid ref for additionalProperties
"""{ "paths": { "/catalogs/my_catalog/items/" : {"get": {
"responses": {
"200": {
"schema": {
"properties": {
"features": {
"items": {
"properties": {
"properties": {
},
"_embeds": {
"properties": {
"assets": {
"XXadditionalProperties": {
"properties": {
"file": {
}
}
}
}
}
}
}
}
}
}
}
}
}
}}}}""", # missing additionalProperties
"""{ "paths": { "/catalogs/my_catalog/items/" : {"get": {
"responses": {
"200": {
"schema": {
"properties": {
"features": {
"items": {
"properties": {
"properties": {
},
"_embeds": {
"properties": {
"assets": {
"additionalProperties": false
}
}
}
}
}
}
}
}
}
}
}}}}""", # additionalProperties of wrong type
"""{ "paths": { "/catalogs/my_catalog/items/" : {"get": {
"responses": {
"200": {
"schema": {
"properties": {
"features": {
"items": {
"properties": {
"properties": {
},
"_embeds": {
"properties": {
"assets": {
"additionalProperties": {
"xxproperties": {}
}
}
}
}
}
}
}
}
}
}
}
}}}}""", # no properties in additionalProperties
]:
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['VERSION=v1', 'API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
lyr = ds.GetLayer(0)
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/spec', spec)
with gdaltest.error_handler():
field_count =lyr.GetLayerDefn().GetFieldCount()
if field_count != 0:
gdaltest.post_reason('fail')
return 'fail'
gdal.SetConfigOption('PL_URL', '/vsimem/v1/catalogs/')
ds = gdal.OpenEx('PLScenes:', gdal.OF_VECTOR, open_options = ['VERSION=v1', 'API_KEY=foo'])
gdal.SetConfigOption('PL_URL', None)
lyr = ds.GetLayer(0)
# Invalid index
ds.GetLayer(-1)
ds.GetLayer(1)
with gdaltest.error_handler():
ds.GetLayerByName('invalid_name')
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/spec',
"""{ "paths": { "/catalogs/my_catalog/items/" : {"get": {
"responses": {
"200": {
"schema": {
"properties": {
"features": {
"items": {
"properties": {
"properties": {
}
}
}
}
}
}
}
}
}}}}""")
# Cannot find /vsimem/v1/catalogs/my_catalog/items/?_embeds=features.*.assets&_page_size=1000
with gdaltest.error_handler():
lyr.GetNextFeature()
# Empty object
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/items/?_embeds=features.*.assets&_page_size=1000', '{}')
lyr.ResetReading()
lyr.GetNextFeature()
# null feature
gdal.FileFromMemBuffer('/vsimem/v1/catalogs/my_catalog/items/?_embeds=features.*.assets&_page_size=1000', '{ "features": [ null ] }')
lyr.ResetReading()
lyr.GetNextFeature()
gdal.Unlink('/vsimem/v1/catalogs')
gdal.Unlink('/vsimem/v1/catalogs/my_catalog/spec')
gdal.Unlink('/vsimem/v1/catalogs/my_catalog/items/?_embeds=features.*.assets&_page_size=1000')
return 'success'
###############################################################################
# Test V1 API against real server
def ogr_plscenes_v1_live():
if gdaltest.plscenes_drv is None:
return 'skip'
api_key = gdal.GetConfigOption('PL_API_KEY')
if api_key is None:
print('Skipping test as PL_API_KEY not defined')
return 'skip'
gdal.SetConfigOption('PLSCENES_PAGE_SIZE', '10')
ds = ogr.Open('PLScenes:version=v1')
gdal.SetConfigOption('PLSCENES_PAGE_SIZE', None)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
lyr = ds.GetLayer(0)
if lyr is None:
gdaltest.post_reason('fail')
return 'fail'
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
lyr_defn = lyr.GetLayerDefn()
created_field = lyr_defn.GetFieldIndex('created')
if created_field < 0 or lyr_defn.GetFieldDefn(created_field).GetType() != ogr.OFTDateTime:
gdaltest.post_reason('fail')
return 'fail'
if not f.IsFieldSet(created_field):
gdaltest.post_reason('fail')
f.DumpReadable()
return 'fail'
int_field = -1
float_field = -1
string_field = -1
for i in range(lyr_defn.GetFieldCount()):
typ = lyr_defn.GetFieldDefn(i).GetType()
if int_field < 0 and typ == ogr.OFTInteger and f.IsFieldSet(i):
int_field = i
elif float_field < 0 and typ == ogr.OFTReal and f.IsFieldSet(i):
float_field = i
elif string_field < 0 and typ == ogr.OFTString and f.IsFieldSet(i):
string_field = i
filter = "created='%s'" % f.GetFieldAsString(created_field)
if int_field >= 0:
name = lyr_defn.GetFieldDefn(int_field).GetName()
min = f.GetField(int_field) - 1
max = f.GetField(int_field) + 1
filter += ' AND %s >= %d AND %s <= %d' % (name, min, name, max)
if float_field >= 0:
name = lyr_defn.GetFieldDefn(float_field).GetName()
min = f.GetField(float_field) - 0.01
max = f.GetField(float_field) + 0.01
filter += ' AND %s BETWEEN %f AND %f' % (name, min, max)
if string_field >= 0:
name = lyr_defn.GetFieldDefn(string_field).GetName()
value = f.GetField(string_field)
filter += " AND %s = '%s'" % (name, value)
lyr.SetAttributeFilter(filter)
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('fail')
return 'fail'
return 'success'
gdaltest_list = [
ogr_plscenes_1,
ogr_plscenes_2,
ogr_plscenes_3,
ogr_plscenes_4,
ogr_plscenes_v1_catalog_no_paging,
ogr_plscenes_v1_catalog_paging,
ogr_plscenes_v1_nominal,
ogr_plscenes_v1_errors,
ogr_plscenes_v1_live
]
if __name__ == '__main__':
gdaltest.setup_run( 'ogr_plscenes' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| 37.626984
| 475
| 0.493942
| 8,196
| 85,338
| 5.004392
| 0.063324
| 0.039789
| 0.059684
| 0.072947
| 0.834723
| 0.808684
| 0.780647
| 0.742369
| 0.708187
| 0.686976
| 0
| 0.032528
| 0.352282
| 85,338
| 2,267
| 476
| 37.643582
| 0.709507
| 0.04563
| 0
| 0.702417
| 0
| 0.034743
| 0.401668
| 0.15594
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006798
| false
| 0
| 0.003021
| 0
| 0.126888
| 0.006042
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
37f8e68b890245f8255bb4088f0297d76d7a33ef
| 108
|
py
|
Python
|
class.py
|
minefarmer/PythonMega
|
1b22f6648ca7a9711853aaa909558d49416d4fd7
|
[
"Unlicense"
] | null | null | null |
class.py
|
minefarmer/PythonMega
|
1b22f6648ca7a9711853aaa909558d49416d4fd7
|
[
"Unlicense"
] | null | null | null |
class.py
|
minefarmer/PythonMega
|
1b22f6648ca7a9711853aaa909558d49416d4fd7
|
[
"Unlicense"
] | null | null | null |
import cv2
img-cv2.imread("galaxy.jpg",-1)
img-cv2.imread("galaxy.jpg",-1)
img-cv2.imread("galaxy.jpg",-1)
| 18
| 31
| 0.694444
| 20
| 108
| 3.75
| 0.35
| 0.24
| 0.48
| 0.72
| 0.88
| 0.88
| 0.88
| 0.88
| 0.88
| 0.88
| 0
| 0.068627
| 0.055556
| 108
| 5
| 32
| 21.6
| 0.666667
| 0
| 0
| 0.75
| 0
| 0
| 0.277778
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
5359811c52c9c872dcdee66e725db59080b5e858
| 18,397
|
py
|
Python
|
tests/components/panasonic_viera/test_config_flow.py
|
ccatterina/core
|
36789cfc310f270bf343676eb94d123e5d0dfa83
|
[
"Apache-2.0"
] | 6
|
2016-11-25T06:36:27.000Z
|
2021-11-16T11:20:23.000Z
|
tests/components/panasonic_viera/test_config_flow.py
|
SicAriuSx83/core
|
162c39258e68ae42fe4e1560ae91ed54f5662409
|
[
"Apache-2.0"
] | 45
|
2020-07-23T07:13:34.000Z
|
2022-03-31T06:01:55.000Z
|
tests/components/panasonic_viera/test_config_flow.py
|
SicAriuSx83/core
|
162c39258e68ae42fe4e1560ae91ed54f5662409
|
[
"Apache-2.0"
] | 2
|
2020-11-17T09:19:47.000Z
|
2020-12-16T03:56:09.000Z
|
"""Test the Panasonic Viera config flow."""
from panasonic_viera import TV_TYPE_ENCRYPTED, TV_TYPE_NONENCRYPTED, SOAPError
import pytest
from homeassistant import config_entries
from homeassistant.components.panasonic_viera.const import (
CONF_APP_ID,
CONF_ENCRYPTION_KEY,
CONF_ON_ACTION,
DEFAULT_NAME,
DEFAULT_PORT,
DOMAIN,
ERROR_INVALID_PIN_CODE,
ERROR_NOT_CONNECTED,
REASON_NOT_CONNECTED,
REASON_UNKNOWN,
)
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PIN, CONF_PORT
from tests.async_mock import Mock, patch
from tests.common import MockConfigEntry
@pytest.fixture(name="panasonic_viera_setup", autouse=True)
def panasonic_viera_setup_fixture():
"""Mock panasonic_viera setup."""
with patch(
"homeassistant.components.panasonic_viera.async_setup", return_value=True
), patch(
"homeassistant.components.panasonic_viera.async_setup_entry",
return_value=True,
):
yield
def get_mock_remote(
host="1.2.3.4",
authorize_error=None,
encrypted=False,
app_id=None,
encryption_key=None,
):
"""Return a mock remote."""
mock_remote = Mock()
mock_remote.type = TV_TYPE_ENCRYPTED if encrypted else TV_TYPE_NONENCRYPTED
mock_remote.app_id = app_id
mock_remote.enc_key = encryption_key
def request_pin_code(name=None):
return
mock_remote.request_pin_code = request_pin_code
def authorize_pin_code(pincode):
if pincode == "1234":
return
if authorize_error is not None:
raise authorize_error
mock_remote.authorize_pin_code = authorize_pin_code
return mock_remote
async def test_flow_non_encrypted(hass):
"""Test flow without encryption."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mock_remote = get_mock_remote(encrypted=False)
with patch(
"homeassistant.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "1.2.3.4", CONF_NAME: DEFAULT_NAME},
)
assert result["type"] == "create_entry"
assert result["title"] == DEFAULT_NAME
assert result["data"] == {
CONF_HOST: "1.2.3.4",
CONF_NAME: DEFAULT_NAME,
CONF_PORT: DEFAULT_PORT,
CONF_ON_ACTION: None,
}
async def test_flow_not_connected_error(hass):
"""Test flow with connection error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch(
"homeassistant.components.panasonic_viera.config_flow.RemoteControl",
side_effect=TimeoutError,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "1.2.3.4", CONF_NAME: DEFAULT_NAME},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": ERROR_NOT_CONNECTED}
async def test_flow_unknown_abort(hass):
"""Test flow with unknown error abortion."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch(
"homeassistant.components.panasonic_viera.config_flow.RemoteControl",
side_effect=Exception,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "1.2.3.4", CONF_NAME: DEFAULT_NAME},
)
assert result["type"] == "abort"
assert result["reason"] == REASON_UNKNOWN
async def test_flow_encrypted_valid_pin_code(hass):
"""Test flow with encryption and valid PIN code."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mock_remote = get_mock_remote(
encrypted=True,
app_id="test-app-id",
encryption_key="test-encryption-key",
)
with patch(
"homeassistant.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "1.2.3.4", CONF_NAME: DEFAULT_NAME},
)
assert result["type"] == "form"
assert result["step_id"] == "pairing"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PIN: "1234"},
)
assert result["type"] == "create_entry"
assert result["title"] == DEFAULT_NAME
assert result["data"] == {
CONF_HOST: "1.2.3.4",
CONF_NAME: DEFAULT_NAME,
CONF_PORT: DEFAULT_PORT,
CONF_ON_ACTION: None,
CONF_APP_ID: "test-app-id",
CONF_ENCRYPTION_KEY: "test-encryption-key",
}
async def test_flow_encrypted_invalid_pin_code_error(hass):
"""Test flow with encryption and invalid PIN code error during pairing step."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mock_remote = get_mock_remote(encrypted=True, authorize_error=SOAPError)
with patch(
"homeassistant.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "1.2.3.4", CONF_NAME: DEFAULT_NAME},
)
assert result["type"] == "form"
assert result["step_id"] == "pairing"
with patch(
"homeassistant.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PIN: "0000"},
)
assert result["type"] == "form"
assert result["step_id"] == "pairing"
assert result["errors"] == {"base": ERROR_INVALID_PIN_CODE}
async def test_flow_encrypted_not_connected_abort(hass):
"""Test flow with encryption and PIN code connection error abortion during pairing step."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mock_remote = get_mock_remote(encrypted=True, authorize_error=TimeoutError)
with patch(
"homeassistant.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "1.2.3.4", CONF_NAME: DEFAULT_NAME},
)
assert result["type"] == "form"
assert result["step_id"] == "pairing"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PIN: "0000"},
)
assert result["type"] == "abort"
assert result["reason"] == REASON_NOT_CONNECTED
async def test_flow_encrypted_unknown_abort(hass):
"""Test flow with encryption and PIN code unknown error abortion during pairing step."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
mock_remote = get_mock_remote(encrypted=True, authorize_error=Exception)
with patch(
"homeassistant.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "1.2.3.4", CONF_NAME: DEFAULT_NAME},
)
assert result["type"] == "form"
assert result["step_id"] == "pairing"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PIN: "0000"},
)
assert result["type"] == "abort"
assert result["reason"] == REASON_UNKNOWN
async def test_flow_non_encrypted_already_configured_abort(hass):
"""Test flow without encryption and existing config entry abortion."""
MockConfigEntry(
domain=DOMAIN,
unique_id="1.2.3.4",
data={CONF_HOST: "1.2.3.4", CONF_NAME: DEFAULT_NAME, CONF_PORT: DEFAULT_PORT},
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
data={CONF_HOST: "1.2.3.4", CONF_NAME: DEFAULT_NAME},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_flow_encrypted_already_configured_abort(hass):
"""Test flow with encryption and existing config entry abortion."""
MockConfigEntry(
domain=DOMAIN,
unique_id="1.2.3.4",
data={
CONF_HOST: "1.2.3.4",
CONF_NAME: DEFAULT_NAME,
CONF_PORT: DEFAULT_PORT,
CONF_APP_ID: "test-app-id",
CONF_ENCRYPTION_KEY: "test-encryption-key",
},
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
data={CONF_HOST: "1.2.3.4", CONF_NAME: DEFAULT_NAME},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_imported_flow_non_encrypted(hass):
"""Test imported flow without encryption."""
mock_remote = get_mock_remote(encrypted=False)
with patch(
"homeassistant.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
CONF_HOST: "1.2.3.4",
CONF_NAME: DEFAULT_NAME,
CONF_PORT: DEFAULT_PORT,
CONF_ON_ACTION: "test-on-action",
},
)
assert result["type"] == "create_entry"
assert result["title"] == DEFAULT_NAME
assert result["data"] == {
CONF_HOST: "1.2.3.4",
CONF_NAME: DEFAULT_NAME,
CONF_PORT: DEFAULT_PORT,
CONF_ON_ACTION: "test-on-action",
}
async def test_imported_flow_encrypted_valid_pin_code(hass):
"""Test imported flow with encryption and valid PIN code."""
mock_remote = get_mock_remote(
encrypted=True,
app_id="test-app-id",
encryption_key="test-encryption-key",
)
with patch(
"homeassistant.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
CONF_HOST: "1.2.3.4",
CONF_NAME: DEFAULT_NAME,
CONF_PORT: DEFAULT_PORT,
CONF_ON_ACTION: "test-on-action",
},
)
assert result["type"] == "form"
assert result["step_id"] == "pairing"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PIN: "1234"},
)
assert result["type"] == "create_entry"
assert result["title"] == DEFAULT_NAME
assert result["data"] == {
CONF_HOST: "1.2.3.4",
CONF_NAME: DEFAULT_NAME,
CONF_PORT: DEFAULT_PORT,
CONF_ON_ACTION: "test-on-action",
CONF_APP_ID: "test-app-id",
CONF_ENCRYPTION_KEY: "test-encryption-key",
}
async def test_imported_flow_encrypted_invalid_pin_code_error(hass):
"""Test imported flow with encryption and invalid PIN code error during pairing step."""
mock_remote = get_mock_remote(encrypted=True, authorize_error=SOAPError)
with patch(
"homeassistant.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
CONF_HOST: "1.2.3.4",
CONF_NAME: DEFAULT_NAME,
CONF_PORT: DEFAULT_PORT,
CONF_ON_ACTION: "test-on-action",
},
)
assert result["type"] == "form"
assert result["step_id"] == "pairing"
with patch(
"homeassistant.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PIN: "0000"},
)
assert result["type"] == "form"
assert result["step_id"] == "pairing"
assert result["errors"] == {"base": ERROR_INVALID_PIN_CODE}
async def test_imported_flow_encrypted_not_connected_abort(hass):
"""Test imported flow with encryption and PIN code connection error abortion during pairing step."""
mock_remote = get_mock_remote(encrypted=True, authorize_error=TimeoutError)
with patch(
"homeassistant.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
CONF_HOST: "1.2.3.4",
CONF_NAME: DEFAULT_NAME,
CONF_PORT: DEFAULT_PORT,
CONF_ON_ACTION: "test-on-action",
},
)
assert result["type"] == "form"
assert result["step_id"] == "pairing"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PIN: "0000"},
)
assert result["type"] == "abort"
assert result["reason"] == REASON_NOT_CONNECTED
async def test_imported_flow_encrypted_unknown_abort(hass):
"""Test imported flow with encryption and PIN code unknown error abortion during pairing step."""
mock_remote = get_mock_remote(encrypted=True, authorize_error=Exception)
with patch(
"homeassistant.components.panasonic_viera.config_flow.RemoteControl",
return_value=mock_remote,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
CONF_HOST: "1.2.3.4",
CONF_NAME: DEFAULT_NAME,
CONF_PORT: DEFAULT_PORT,
CONF_ON_ACTION: "test-on-action",
},
)
assert result["type"] == "form"
assert result["step_id"] == "pairing"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PIN: "0000"},
)
assert result["type"] == "abort"
assert result["reason"] == REASON_UNKNOWN
async def test_imported_flow_not_connected_error(hass):
"""Test imported flow with connection error abortion."""
with patch(
"homeassistant.components.panasonic_viera.config_flow.RemoteControl",
side_effect=TimeoutError,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
CONF_HOST: "1.2.3.4",
CONF_NAME: DEFAULT_NAME,
CONF_PORT: DEFAULT_PORT,
CONF_ON_ACTION: "test-on-action",
},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": ERROR_NOT_CONNECTED}
async def test_imported_flow_unknown_abort(hass):
"""Test imported flow with unknown error abortion."""
with patch(
"homeassistant.components.panasonic_viera.config_flow.RemoteControl",
side_effect=Exception,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
CONF_HOST: "1.2.3.4",
CONF_NAME: DEFAULT_NAME,
CONF_PORT: DEFAULT_PORT,
CONF_ON_ACTION: "test-on-action",
},
)
assert result["type"] == "abort"
assert result["reason"] == REASON_UNKNOWN
async def test_imported_flow_non_encrypted_already_configured_abort(hass):
"""Test imported flow without encryption and existing config entry abortion."""
MockConfigEntry(
domain=DOMAIN,
unique_id="1.2.3.4",
data={
CONF_HOST: "1.2.3.4",
CONF_NAME: DEFAULT_NAME,
CONF_PORT: DEFAULT_PORT,
CONF_ON_ACTION: "test-on-action",
},
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_HOST: "1.2.3.4", CONF_NAME: DEFAULT_NAME},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_imported_flow_encrypted_already_configured_abort(hass):
"""Test imported flow with encryption and existing config entry abortion."""
MockConfigEntry(
domain=DOMAIN,
unique_id="1.2.3.4",
data={
CONF_HOST: "1.2.3.4",
CONF_NAME: DEFAULT_NAME,
CONF_PORT: DEFAULT_PORT,
CONF_ON_ACTION: "test-on-action",
CONF_APP_ID: "test-app-id",
CONF_ENCRYPTION_KEY: "test-encryption-key",
},
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_HOST: "1.2.3.4", CONF_NAME: DEFAULT_NAME},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
| 30.358086
| 104
| 0.636299
| 2,172
| 18,397
| 5.112339
| 0.050645
| 0.079971
| 0.044579
| 0.06241
| 0.90598
| 0.897514
| 0.876261
| 0.838437
| 0.818714
| 0.816372
| 0
| 0.011557
| 0.247432
| 18,397
| 605
| 105
| 30.408264
| 0.790466
| 0.004729
| 0
| 0.758242
| 0
| 0
| 0.16044
| 0.069505
| 0
| 0
| 0
| 0
| 0.162637
| 1
| 0.008791
| false
| 0
| 0.054945
| 0.002198
| 0.07033
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
727d8d6a0ee392b038a0dac1b617ba5dee6b6772
| 153
|
py
|
Python
|
teste.py
|
lisboa1701/projeto-ADS_GTI1
|
dae71089dc0ab04feb478ffcc125cd3091ba74e3
|
[
"Apache-2.0"
] | null | null | null |
teste.py
|
lisboa1701/projeto-ADS_GTI1
|
dae71089dc0ab04feb478ffcc125cd3091ba74e3
|
[
"Apache-2.0"
] | null | null | null |
teste.py
|
lisboa1701/projeto-ADS_GTI1
|
dae71089dc0ab04feb478ffcc125cd3091ba74e3
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from principal import soma
from principal import sub
def test_Somar():
assert soma(2,4) == 6
def test_sub():
assert sub(9,5) == 4
| 17
| 26
| 0.69281
| 26
| 153
| 4
| 0.576923
| 0.25
| 0.365385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04918
| 0.202614
| 153
| 9
| 27
| 17
| 0.803279
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.285714
| true
| 0
| 0.428571
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
7287bac3adc79ac742494fce9f65b1d46cbb3ec9
| 5,338
|
py
|
Python
|
data/cifar.py
|
rickdr/Data-analysis-DNN-testing
|
a6b049ba218843c3ea9edf318f13a6f192b7a440
|
[
"MIT"
] | null | null | null |
data/cifar.py
|
rickdr/Data-analysis-DNN-testing
|
a6b049ba218843c3ea9edf318f13a6f192b7a440
|
[
"MIT"
] | null | null | null |
data/cifar.py
|
rickdr/Data-analysis-DNN-testing
|
a6b049ba218843c3ea9edf318f13a6f192b7a440
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
import torchvision
from torch.utils.data import Sampler
from torchvision import transforms
from collections import Counter
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from utils.data import calculate_distribution
def load_10(loader=True, batch_sampler=None, sampler=None, batch_size=64, path="./project_data"):
# CIFAR-10
transform = transforms.Compose([
transforms.ToTensor()
])
train = torchvision.datasets.CIFAR10(
root=path+'/datasets', train=True, transform=transform, download=True)
test = torchvision.datasets.CIFAR10(
root=path+'/datasets', train=False, transform=transform, download=True)
if loader is False:
return "cifar10", train, test
train, val = torch.utils.data.random_split(
train, [int(len(train) * 0.9), int(len(train) * 0.1)])
# train_indices, val_indices = train_test_split(list(range(len(train.targets))), test_size=0.1, stratify=train.targets)
# train = torch.utils.data.Subset(train, train_indices)
# val = torch.utils.data.Subset(train, val_indices)
if batch_sampler is not None:
train_batch_sampler = batch_sampler(train.dataset.targets, batch_size=batch_size)
val_batch_sampler = batch_sampler(val.dataset.targets, batch_size=batch_size)
test_batch_sampler = batch_sampler(test.targets, batch_size=batch_size)
train_loader = torch.utils.data.DataLoader(train, batch_sampler=batch_sampler)
val_loader = torch.utils.data.DataLoader(val, batch_sampler=batch_sampler)
test_loader = torch.utils.data.DataLoader(test, batch_sampler=batch_sampler)
else:
train_shuffle = True
train_sampler = None
val_sampler = None
test_sampler = None
if sampler is not None:
train_shuffle = False
class_weights = calculate_distribution(train)
train_sampler = sampler(train.dataset, train.dataset.targets)# , weights=class_weights)
class_weights = calculate_distribution(val)
val_sampler = sampler(val.dataset, val.dataset.targets)# , weights=class_weights)
class_weights = calculate_distribution(test)
test_sampler = sampler(test, test.targets)# , weights=class_weights)
train_loader = torch.utils.data.DataLoader(
train, batch_size=batch_size, shuffle=train_shuffle, sampler=train_sampler, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val, batch_size=batch_size, shuffle=False, sampler=val_sampler)
test_loader = torch.utils.data.DataLoader(
test, batch_size=batch_size, shuffle=False, sampler=test_sampler)
return "cifar10", train_loader, val_loader, test_loader
def load_100(loader=True, batch_sampler=None, sampler=None, batch_size=64, path="./project_data"):
# CIFAR-100
transform = transforms.Compose([
transforms.ToTensor()
])
train = torchvision.datasets.CIFAR100(
root=path+'/datasets', train=True, transform=transform, download=True)
test = torchvision.datasets.CIFAR100(
root=path+'/datasets', train=False, transform=transform, download=True)
if loader is False:
return "cifar100", train, test
train, val = torch.utils.data.random_split(
train, [int(len(train) * 0.9), int(len(train) * 0.1)])
# train_indices, val_indices = train_test_split(list(range(len(train.targets))), test_size=0.1, stratify=train.targets)
# train = torch.utils.data.Subset(train, train_indices)
# val = torch.utils.data.Subset(train, val_indices)
if batch_sampler is not None:
train_batch_sampler = batch_sampler(train.dataset.targets, batch_size=batch_size)
val_batch_sampler = batch_sampler(val.dataset.targets, batch_size=batch_size)
test_batch_sampler = batch_sampler(test.targets, batch_size=batch_size)
train_loader = torch.utils.data.DataLoader(train, batch_sampler=batch_sampler)
val_loader = torch.utils.data.DataLoader(val, batch_sampler=batch_sampler)
test_loader = torch.utils.data.DataLoader(test, batch_sampler=batch_sampler)
else:
train_shuffle = True
train_sampler = None
val_sampler = None
test_sampler = None
if sampler is not None:
train_shuffle = False
class_weights = calculate_distribution(train)
train_sampler = sampler(train.dataset, train.dataset.targets)# , weights=class_weights)
class_weights = calculate_distribution(val)
val_sampler = sampler(val.dataset, val.dataset.targets)# , weights=class_weights)
class_weights = calculate_distribution(test)
test_sampler = sampler(test, test.targets)# , weights=class_weights)
train_loader = torch.utils.data.DataLoader(
train, batch_size=batch_size, shuffle=train_shuffle, sampler=train_sampler, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val, batch_size=batch_size, shuffle=False, sampler=val_sampler)
test_loader = torch.utils.data.DataLoader(
test, batch_size=batch_size, shuffle=False, sampler=test_sampler)
return "cifar100", train_loader, val_loader, test_loader
| 45.237288
| 123
| 0.70607
| 664
| 5,338
| 5.454819
| 0.103916
| 0.092766
| 0.07344
| 0.079514
| 0.929321
| 0.912203
| 0.895638
| 0.887355
| 0.849807
| 0.849807
| 0
| 0.010733
| 0.197078
| 5,338
| 118
| 124
| 45.237288
| 0.834344
| 0.11465
| 0
| 0.781609
| 0
| 0
| 0.019949
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022989
| false
| 0
| 0.103448
| 0
| 0.172414
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
72cff5b7ef48991ee7a3c03e1ab6ec30b8b929fa
| 71,542
|
py
|
Python
|
src/test/scenarios/synapse/output/extflatten/src/synapse/azext_synapse/generated/_params.py
|
isabella232/autorest.az
|
a237977061608b9cdd8b66cbc27dd3db8891a669
|
[
"MIT"
] | null | null | null |
src/test/scenarios/synapse/output/extflatten/src/synapse/azext_synapse/generated/_params.py
|
isabella232/autorest.az
|
a237977061608b9cdd8b66cbc27dd3db8891a669
|
[
"MIT"
] | 1
|
2021-02-24T09:10:12.000Z
|
2021-02-24T09:10:12.000Z
|
src/test/scenarios/synapse/output/extflatten/src/synapse/azext_synapse/generated/_params.py
|
isabella232/autorest.az
|
a237977061608b9cdd8b66cbc27dd3db8891a669
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=too-many-statements
from azure.cli.core.commands.parameters import (
tags_type,
get_three_state_flag,
get_enum_type,
resource_group_name_type,
get_location_type
)
from azure.cli.core.commands.validators import (
get_default_location_from_resource_group,
validate_file_or_dict
)
from azext_synapse.action import (
AddAutoScale,
AddAutoPause,
AddLibraryRequirements,
AddSku,
AddRecurringScans,
AddBaselineResults,
AddDefaultDataLakeStorage,
AddConnectivityEndpoints,
AddPrivateEndpointConnections
)
def load_arguments(self, _):
with self.argument_context('synapse big-data-pool list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
with self.argument_context('synapse big-data-pool show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('big_data_pool_name', options_list=['--name', '-n', '--big-data-pool-name'], type=str, help='Big '
'Data pool name', id_part='child_name_1')
with self.argument_context('synapse big-data-pool create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('big_data_pool_name', options_list=['--name', '-n', '--big-data-pool-name'], type=str, help='Big '
'Data pool name')
c.argument('force', arg_type=get_three_state_flag(), help='Whether to stop any running jobs in the Big Data '
'pool')
c.argument('tags', tags_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('provisioning_state', type=str, help='The state of the Big Data pool.')
c.argument('auto_scale', action=AddAutoScale, nargs='+', help='Auto-scaling properties')
c.argument('creation_date', help='The time when the Big Data pool was created.')
c.argument('auto_pause', action=AddAutoPause, nargs='+', help='Auto-pausing properties')
c.argument('spark_events_folder', type=str, help='The Spark events folder')
c.argument('node_count', type=int, help='The number of nodes in the Big Data pool.')
c.argument('library_requirements', action=AddLibraryRequirements, nargs='+', help='Library version '
'requirements')
c.argument('spark_version', type=str, help='The Apache Spark version.')
c.argument('default_spark_log_folder', type=str, help='The default folder where Spark logs will be written.')
c.argument('node_size', arg_type=get_enum_type(['None', 'Small', 'Medium', 'Large']), help='The level of '
'compute power that each node in the Big Data pool has.')
c.argument('node_size_family', arg_type=get_enum_type(['None', 'MemoryOptimized']), help='The kind of nodes '
'that the Big Data pool provides.')
with self.argument_context('synapse big-data-pool update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('big_data_pool_name', options_list=['--name', '-n', '--big-data-pool-name'], type=str, help='Big '
'Data pool name', id_part='child_name_1')
c.argument('tags', tags_type)
with self.argument_context('synapse big-data-pool delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('big_data_pool_name', options_list=['--name', '-n', '--big-data-pool-name'], type=str, help='Big '
'Data pool name', id_part='child_name_1')
with self.argument_context('synapse big-data-pool wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('big_data_pool_name', options_list=['--name', '-n', '--big-data-pool-name'], type=str, help='Big '
'Data pool name', id_part='child_name_1')
with self.argument_context('synapse operation show-azure-async-header-result') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('operation_id', type=str, help='Operation ID', id_part='child_name_1')
with self.argument_context('synapse operation show-location-header-result') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('operation_id', type=str, help='Operation ID', id_part='child_name_1')
with self.argument_context('synapse ip-firewall-rule list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
with self.argument_context('synapse ip-firewall-rule show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('rule_name', type=str, help='The IP firewall rule name', id_part='child_name_1')
with self.argument_context('synapse ip-firewall-rule create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('rule_name', type=str, help='The IP firewall rule name')
c.argument('end_ip_address', type=str, help='The end IP address of the firewall rule. Must be IPv4 format. '
'Must be greater than or equal to startIpAddress')
c.argument('start_ip_address', type=str,
help='The start IP address of the firewall rule. Must be IPv4 format')
with self.argument_context('synapse ip-firewall-rule update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('rule_name', type=str, help='The IP firewall rule name', id_part='child_name_1')
c.argument('end_ip_address', type=str, help='The end IP address of the firewall rule. Must be IPv4 format. '
'Must be greater than or equal to startIpAddress')
c.argument('start_ip_address', type=str,
help='The start IP address of the firewall rule. Must be IPv4 format')
with self.argument_context('synapse ip-firewall-rule delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('rule_name', type=str, help='The IP firewall rule name', id_part='child_name_1')
with self.argument_context('synapse ip-firewall-rule replace-all') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('ip_firewall_rules', type=validate_file_or_dict, help='IP firewall rule properties Expected value: '
'json-string/@json-file.')
with self.argument_context('synapse ip-firewall-rule wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('rule_name', type=str, help='The IP firewall rule name', id_part='child_name_1')
with self.argument_context('synapse sql-pool list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
with self.argument_context('synapse sql-pool show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', options_list=['--name', '-n', '--sql-pool-name'], type=str, help='SQL pool name',
id_part='child_name_1')
with self.argument_context('synapse sql-pool create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('sql_pool_name', options_list=['--name', '-n', '--sql-pool-name'], type=str, help='SQL pool name')
c.argument('tags', tags_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('sku', action=AddSku, nargs='+', help='SQL pool SKU')
c.argument('max_size_bytes', type=int, help='Maximum size in bytes')
c.argument('collation', type=str, help='Collation mode')
c.argument('source_database_id', type=str, help='Source database to create from')
c.argument('recoverable_database_id', type=str, help='Backup database to restore from')
c.argument('provisioning_state', type=str, help='Resource state')
c.argument('status', type=str, help='Resource status')
c.argument('restore_point_in_time', help='Snapshot time to restore')
c.argument('create_mode', type=str, help='What is this?')
c.argument('creation_date', help='Date the SQL pool was created')
with self.argument_context('synapse sql-pool update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', options_list=['--name', '-n', '--sql-pool-name'], type=str, help='SQL pool name',
id_part='child_name_1')
c.argument('tags', tags_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('sku', action=AddSku, nargs='+', help='SQL pool SKU')
c.argument('max_size_bytes', type=int, help='Maximum size in bytes')
c.argument('collation', type=str, help='Collation mode')
c.argument('source_database_id', type=str, help='Source database to create from')
c.argument('recoverable_database_id', type=str, help='Backup database to restore from')
c.argument('provisioning_state', type=str, help='Resource state')
c.argument('status', type=str, help='Resource status')
c.argument('restore_point_in_time', help='Snapshot time to restore')
c.argument('create_mode', type=str, help='What is this?')
c.argument('creation_date', help='Date the SQL pool was created')
with self.argument_context('synapse sql-pool delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', options_list=['--name', '-n', '--sql-pool-name'], type=str, help='SQL pool name',
id_part='child_name_1')
with self.argument_context('synapse sql-pool pause') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', options_list=['--name', '-n', '--sql-pool-name'], type=str, help='SQL pool name',
id_part='child_name_1')
with self.argument_context('synapse sql-pool rename') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', options_list=['--name', '-n', '--sql-pool-name'], type=str, help='SQL pool name',
id_part='child_name_1')
c.argument('id_', options_list=['--id'], type=str, help='The target ID for the resource')
with self.argument_context('synapse sql-pool resume') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', options_list=['--name', '-n', '--sql-pool-name'], type=str, help='SQL pool name',
id_part='child_name_1')
with self.argument_context('synapse sql-pool wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', options_list=['--name', '-n', '--sql-pool-name'], type=str, help='SQL pool name',
id_part='child_name_1')
with self.argument_context('synapse sql-pool-metadata-sync-config show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
with self.argument_context('synapse sql-pool-metadata-sync-config create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('sql_pool_name', type=str, help='SQL pool name')
c.argument('enabled', arg_type=get_three_state_flag(), help='Indicates whether the metadata sync is enabled or '
'disabled')
with self.argument_context('synapse sql-pool-operation-result show-location-header-result') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
c.argument('operation_id', type=str, help='Operation ID', id_part='child_name_2')
with self.argument_context('synapse sql-pool-geo-backup-policy show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
with self.argument_context('synapse sql-pool-data-warehouse-user-activity show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
with self.argument_context('synapse sql-pool-restore-point list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('sql_pool_name', type=str, help='SQL pool name')
with self.argument_context('synapse sql-pool-restore-point create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('sql_pool_name', type=str, help='SQL pool name')
c.argument('restore_point_label', type=str, help='The restore point label to apply')
with self.argument_context('synapse sql-pool-replication-link list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('sql_pool_name', type=str, help='SQL pool name')
with self.argument_context('synapse sql-pool-transparent-data-encryption show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
with self.argument_context('synapse sql-pool-transparent-data-encryption create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('sql_pool_name', type=str, help='SQL pool name')
c.argument('status', arg_type=get_enum_type(['Enabled', 'Disabled']), help='The status of the database '
'transparent data encryption.')
with self.argument_context('synapse sql-pool-transparent-data-encryption update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
c.argument('status', arg_type=get_enum_type(['Enabled', 'Disabled']), help='The status of the database '
'transparent data encryption.')
with self.argument_context('synapse sql-pool-blob-auditing-policy show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
with self.argument_context('synapse sql-pool-blob-auditing-policy create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('sql_pool_name', type=str, help='SQL pool name')
c.argument('state', arg_type=get_enum_type(['Enabled', 'Disabled']), help='Specifies the state of the policy. '
'If state is Enabled, storageEndpoint or isAzureMonitorTargetEnabled are required.')
c.argument('storage_endpoint', type=str, help='Specifies the blob storage endpoint (e.g. '
'https://MyAccount.blob.core.windows.net). If state is Enabled, storageEndpoint is required.')
c.argument('storage_account_access_key', type=str, help='Specifies the identifier key of the auditing storage '
'account. If state is Enabled and storageEndpoint is specified, storageAccountAccessKey is '
'required.')
c.argument('retention_days', type=int, help='Specifies the number of days to keep in the audit logs in the '
'storage account.')
c.argument('audit_actions_and_groups', nargs='+', help='Specifies the Actions-Groups and Actions to audit. '
'The recommended set of action groups to use is the following combination - this will audit all the '
'queries and stored procedures executed against the database, as well as successful and failed '
'logins: BATCH_COMPLETED_GROUP, SUCCESSFUL_DATABASE_AUTHENTICATION_GROUP, '
'FAILED_DATABASE_AUTHENTICATION_GROUP. This above combination is also the set that is configured '
'by default when enabling auditing from the Azure portal. The supported action groups to audit are '
'(note: choose only specific groups that cover your auditing needs. Using unnecessary groups could '
'lead to very large quantities of audit records): APPLICATION_ROLE_CHANGE_PASSWORD_GROUP '
'BACKUP_RESTORE_GROUP DATABASE_LOGOUT_GROUP DATABASE_OBJECT_CHANGE_GROUP '
'DATABASE_OBJECT_OWNERSHIP_CHANGE_GROUP DATABASE_OBJECT_PERMISSION_CHANGE_GROUP '
'DATABASE_OPERATION_GROUP DATABASE_PERMISSION_CHANGE_GROUP DATABASE_PRINCIPAL_CHANGE_GROUP '
'DATABASE_PRINCIPAL_IMPERSONATION_GROUP DATABASE_ROLE_MEMBER_CHANGE_GROUP '
'FAILED_DATABASE_AUTHENTICATION_GROUP SCHEMA_OBJECT_ACCESS_GROUP SCHEMA_OBJECT_CHANGE_GROUP '
'SCHEMA_OBJECT_OWNERSHIP_CHANGE_GROUP SCHEMA_OBJECT_PERMISSION_CHANGE_GROUP '
'SUCCESSFUL_DATABASE_AUTHENTICATION_GROUP USER_CHANGE_PASSWORD_GROUP BATCH_STARTED_GROUP '
'BATCH_COMPLETED_GROUP These are groups that cover all sql statements and stored procedures '
'executed against the database, and should not be used in combination with other groups as this '
'will result in duplicate audit logs. For more information, see `Database-Level Audit Action '
'Groups <https://docs.microsoft.com/en-us/sql/relational-databases/security/auditing/sql-server-audi'
't-action-groups-and-actions#database-level-audit-action-groups>`_. For Database auditing policy, '
'specific Actions can also be specified (note that Actions cannot be specified for Server auditing '
'policy). The supported actions to audit are: SELECT UPDATE INSERT DELETE EXECUTE RECEIVE '
'REFERENCES The general form for defining an action to be audited is: {action} ON {object} BY '
'{principal} Note that :code:`<object>` in the above format can refer to an object like a table, '
'view, or stored procedure, or an entire database or schema. For the latter cases, the forms '
'DATABASE::{db_name} and SCHEMA::{schema_name} are used, respectively. For example: SELECT on '
'dbo.myTable by public SELECT on DATABASE::myDatabase by public SELECT on SCHEMA::mySchema by '
'public For more information, see `Database-Level Audit Actions <https://docs.microsoft.com/en-us/s'
'ql/relational-databases/security/auditing/sql-server-audit-action-groups-and-actions#database-level'
'-audit-actions>`_')
c.argument('storage_account_subscription_id', help='Specifies the blob storage subscription Id.')
c.argument('is_storage_secondary_key_in_use', arg_type=get_three_state_flag(), help='Specifies whether '
'storageAccountAccessKey value is the storage\'s secondary key.')
c.argument('is_azure_monitor_target_enabled', arg_type=get_three_state_flag(), help='Specifies whether audit '
'events are sent to Azure Monitor. In order to send the events to Azure Monitor, specify \'state\' '
'as \'Enabled\' and \'isAzureMonitorTargetEnabled\' as true. When using REST API to configure '
'auditing, Diagnostic Settings with \'SQLSecurityAuditEvents\' diagnostic logs category on the '
'database should be also created. Note that for server level audit you should use the \'master\' '
'database as {databaseName}. Diagnostic Settings URI format: PUT https://management.azure.com/subsc'
'riptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Sql/servers/{serverNam'
'e}/databases/{databaseName}/providers/microsoft.insights/diagnosticSettings/{settingsName}?api-vers'
'ion=2017-05-01-preview For more information, see `Diagnostic Settings REST API '
'<https://go.microsoft.com/fwlink/?linkid=2033207>`_ or `Diagnostic Settings PowerShell '
'<https://go.microsoft.com/fwlink/?linkid=2033043>`_')
with self.argument_context('synapse sql-pool-blob-auditing-policy update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
c.argument('state', arg_type=get_enum_type(['Enabled', 'Disabled']), help='Specifies the state of the policy. '
'If state is Enabled, storageEndpoint or isAzureMonitorTargetEnabled are required.')
c.argument('storage_endpoint', type=str, help='Specifies the blob storage endpoint (e.g. '
'https://MyAccount.blob.core.windows.net). If state is Enabled, storageEndpoint is required.')
c.argument('storage_account_access_key', type=str, help='Specifies the identifier key of the auditing storage '
'account. If state is Enabled and storageEndpoint is specified, storageAccountAccessKey is '
'required.')
c.argument('retention_days', type=int, help='Specifies the number of days to keep in the audit logs in the '
'storage account.')
c.argument('audit_actions_and_groups', nargs='+', help='Specifies the Actions-Groups and Actions to audit. '
'The recommended set of action groups to use is the following combination - this will audit all the '
'queries and stored procedures executed against the database, as well as successful and failed '
'logins: BATCH_COMPLETED_GROUP, SUCCESSFUL_DATABASE_AUTHENTICATION_GROUP, '
'FAILED_DATABASE_AUTHENTICATION_GROUP. This above combination is also the set that is configured '
'by default when enabling auditing from the Azure portal. The supported action groups to audit are '
'(note: choose only specific groups that cover your auditing needs. Using unnecessary groups could '
'lead to very large quantities of audit records): APPLICATION_ROLE_CHANGE_PASSWORD_GROUP '
'BACKUP_RESTORE_GROUP DATABASE_LOGOUT_GROUP DATABASE_OBJECT_CHANGE_GROUP '
'DATABASE_OBJECT_OWNERSHIP_CHANGE_GROUP DATABASE_OBJECT_PERMISSION_CHANGE_GROUP '
'DATABASE_OPERATION_GROUP DATABASE_PERMISSION_CHANGE_GROUP DATABASE_PRINCIPAL_CHANGE_GROUP '
'DATABASE_PRINCIPAL_IMPERSONATION_GROUP DATABASE_ROLE_MEMBER_CHANGE_GROUP '
'FAILED_DATABASE_AUTHENTICATION_GROUP SCHEMA_OBJECT_ACCESS_GROUP SCHEMA_OBJECT_CHANGE_GROUP '
'SCHEMA_OBJECT_OWNERSHIP_CHANGE_GROUP SCHEMA_OBJECT_PERMISSION_CHANGE_GROUP '
'SUCCESSFUL_DATABASE_AUTHENTICATION_GROUP USER_CHANGE_PASSWORD_GROUP BATCH_STARTED_GROUP '
'BATCH_COMPLETED_GROUP These are groups that cover all sql statements and stored procedures '
'executed against the database, and should not be used in combination with other groups as this '
'will result in duplicate audit logs. For more information, see `Database-Level Audit Action '
'Groups <https://docs.microsoft.com/en-us/sql/relational-databases/security/auditing/sql-server-audi'
't-action-groups-and-actions#database-level-audit-action-groups>`_. For Database auditing policy, '
'specific Actions can also be specified (note that Actions cannot be specified for Server auditing '
'policy). The supported actions to audit are: SELECT UPDATE INSERT DELETE EXECUTE RECEIVE '
'REFERENCES The general form for defining an action to be audited is: {action} ON {object} BY '
'{principal} Note that :code:`<object>` in the above format can refer to an object like a table, '
'view, or stored procedure, or an entire database or schema. For the latter cases, the forms '
'DATABASE::{db_name} and SCHEMA::{schema_name} are used, respectively. For example: SELECT on '
'dbo.myTable by public SELECT on DATABASE::myDatabase by public SELECT on SCHEMA::mySchema by '
'public For more information, see `Database-Level Audit Actions <https://docs.microsoft.com/en-us/s'
'ql/relational-databases/security/auditing/sql-server-audit-action-groups-and-actions#database-level'
'-audit-actions>`_')
c.argument('storage_account_subscription_id', help='Specifies the blob storage subscription Id.')
c.argument('is_storage_secondary_key_in_use', arg_type=get_three_state_flag(), help='Specifies whether '
'storageAccountAccessKey value is the storage\'s secondary key.')
c.argument('is_azure_monitor_target_enabled', arg_type=get_three_state_flag(), help='Specifies whether audit '
'events are sent to Azure Monitor. In order to send the events to Azure Monitor, specify \'state\' '
'as \'Enabled\' and \'isAzureMonitorTargetEnabled\' as true. When using REST API to configure '
'auditing, Diagnostic Settings with \'SQLSecurityAuditEvents\' diagnostic logs category on the '
'database should be also created. Note that for server level audit you should use the \'master\' '
'database as {databaseName}. Diagnostic Settings URI format: PUT https://management.azure.com/subsc'
'riptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Sql/servers/{serverNam'
'e}/databases/{databaseName}/providers/microsoft.insights/diagnosticSettings/{settingsName}?api-vers'
'ion=2017-05-01-preview For more information, see `Diagnostic Settings REST API '
'<https://go.microsoft.com/fwlink/?linkid=2033207>`_ or `Diagnostic Settings PowerShell '
'<https://go.microsoft.com/fwlink/?linkid=2033043>`_')
with self.argument_context('synapse sql-pool-operation list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('sql_pool_name', type=str, help='SQL pool name')
with self.argument_context('synapse sql-pool-usage list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('sql_pool_name', type=str, help='SQL pool name')
with self.argument_context('synapse sql-pool-sensitivity-label create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('sql_pool_name', type=str, help='SQL pool name')
c.argument('schema_name', type=str, help='The name of the schema.')
c.argument('table_name', type=str, help='The name of the table.')
c.argument('column_name', type=str, help='The name of the column.')
c.argument('label_name', type=str, help='The label name.')
c.argument('label_id', type=str, help='The label ID.')
c.argument('information_type', type=str, help='The information type.')
c.argument('information_type_id', type=str, help='The information type ID.')
with self.argument_context('synapse sql-pool-sensitivity-label update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
c.argument('schema_name', type=str, help='The name of the schema.', id_part='child_name_2')
c.argument('table_name', type=str, help='The name of the table.', id_part='child_name_3')
c.argument('column_name', type=str, help='The name of the column.', id_part='child_name_4')
c.argument('label_name', type=str, help='The label name.')
c.argument('label_id', type=str, help='The label ID.')
c.argument('information_type', type=str, help='The information type.')
c.argument('information_type_id', type=str, help='The information type ID.')
with self.argument_context('synapse sql-pool-sensitivity-label delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
c.argument('schema_name', type=str, help='The name of the schema.', id_part='child_name_2')
c.argument('table_name', type=str, help='The name of the table.', id_part='child_name_3')
c.argument('column_name', type=str, help='The name of the column.', id_part='child_name_4')
with self.argument_context('synapse sql-pool-sensitivity-label disable-recommendation') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
c.argument('schema_name', type=str, help='The name of the schema.', id_part='child_name_2')
c.argument('table_name', type=str, help='The name of the table.', id_part='child_name_3')
c.argument('column_name', type=str, help='The name of the column.', id_part='child_name_4')
with self.argument_context('synapse sql-pool-sensitivity-label enable-recommendation') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
c.argument('schema_name', type=str, help='The name of the schema.', id_part='child_name_2')
c.argument('table_name', type=str, help='The name of the table.', id_part='child_name_3')
c.argument('column_name', type=str, help='The name of the column.', id_part='child_name_4')
with self.argument_context('synapse sql-pool-sensitivity-label list-current') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('sql_pool_name', type=str, help='SQL pool name')
c.argument('filter_', options_list=['--filter'], type=str, help='An OData filter expression that filters '
'elements in the collection.')
with self.argument_context('synapse sql-pool-sensitivity-label list-recommended') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('sql_pool_name', type=str, help='SQL pool name')
c.argument('include_disabled_recommendations', arg_type=get_three_state_flag(), help='Specifies whether to '
'include disabled recommendations or not.')
c.argument('skip_token', type=str, help='An OData query option to indicate how many elements to skip in the '
'collection.')
c.argument('filter_', options_list=['--filter'], type=str, help='An OData filter expression that filters '
'elements in the collection.')
with self.argument_context('synapse sql-pool-schema list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('sql_pool_name', type=str, help='SQL pool name')
c.argument('filter_', options_list=['--filter'], type=str, help='An OData filter expression that filters '
'elements in the collection.')
with self.argument_context('synapse sql-pool-table list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('sql_pool_name', type=str, help='SQL pool name')
c.argument('schema_name', type=str, help='The name of the schema.')
c.argument('filter_', options_list=['--filter'], type=str, help='An OData filter expression that filters '
'elements in the collection.')
with self.argument_context('synapse sql-pool-table-column list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('sql_pool_name', type=str, help='SQL pool name')
c.argument('schema_name', type=str, help='The name of the schema.')
c.argument('table_name', type=str, help='The name of the table.')
c.argument('filter_', options_list=['--filter'], type=str, help='An OData filter expression that filters '
'elements in the collection.')
with self.argument_context('synapse sql-pool-connection-policy show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
with self.argument_context('synapse sql-pool-vulnerability-assessment list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('sql_pool_name', type=str, help='SQL pool name')
with self.argument_context('synapse sql-pool-vulnerability-assessment show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
with self.argument_context('synapse sql-pool-vulnerability-assessment create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('sql_pool_name', type=str, help='SQL pool name')
c.argument('storage_container_path', type=str, help='A blob storage container path to hold the scan results '
'(e.g. https://myStorage.blob.core.windows.net/VaScans/). It is required if server level '
'vulnerability assessment policy doesn\'t set')
c.argument('storage_container_sas_key', type=str, help='A shared access signature (SAS Key) that has write '
'access to the blob container specified in \'storageContainerPath\' parameter. If '
'\'storageAccountAccessKey\' isn\'t specified, StorageContainerSasKey is required.')
c.argument('storage_account_access_key', type=str, help='Specifies the identifier key of the storage account '
'for vulnerability assessment scan results. If \'StorageContainerSasKey\' isn\'t specified, '
'storageAccountAccessKey is required.')
c.argument('recurring_scans', action=AddRecurringScans, nargs='+', help='The recurring scans settings')
with self.argument_context('synapse sql-pool-vulnerability-assessment update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
c.argument('storage_container_path', type=str, help='A blob storage container path to hold the scan results '
'(e.g. https://myStorage.blob.core.windows.net/VaScans/). It is required if server level '
'vulnerability assessment policy doesn\'t set')
c.argument('storage_container_sas_key', type=str, help='A shared access signature (SAS Key) that has write '
'access to the blob container specified in \'storageContainerPath\' parameter. If '
'\'storageAccountAccessKey\' isn\'t specified, StorageContainerSasKey is required.')
c.argument('storage_account_access_key', type=str, help='Specifies the identifier key of the storage account '
'for vulnerability assessment scan results. If \'StorageContainerSasKey\' isn\'t specified, '
'storageAccountAccessKey is required.')
c.argument('recurring_scans', action=AddRecurringScans, nargs='+', help='The recurring scans settings')
with self.argument_context('synapse sql-pool-vulnerability-assessment delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
with self.argument_context('synapse sql-pool-vulnerability-assessment-scan list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('sql_pool_name', type=str, help='SQL pool name')
with self.argument_context('synapse sql-pool-vulnerability-assessment-scan export') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
c.argument('scan_id', type=str, help='The vulnerability assessment scan Id of the scan to retrieve.',
id_part='child_name_3')
with self.argument_context('synapse sql-pool-vulnerability-assessment-scan initiate-scan') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
c.argument('scan_id', type=str, help='The vulnerability assessment scan Id of the scan to retrieve.',
id_part='child_name_3')
with self.argument_context('synapse sql-pool-security-alert-policy show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
with self.argument_context('synapse sql-pool-security-alert-policy create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('sql_pool_name', type=str, help='SQL pool name')
c.argument('state', arg_type=get_enum_type(['New', 'Enabled', 'Disabled']), help='Specifies the state of the '
'policy, whether it is enabled or disabled or a policy has not been applied yet on the specific Sql '
'pool.')
c.argument('disabled_alerts', nargs='+', help='Specifies an array of alerts that are disabled. Allowed values '
'are: Sql_Injection, Sql_Injection_Vulnerability, Access_Anomaly, Data_Exfiltration, Unsafe_Action')
c.argument('email_addresses', nargs='+', help='Specifies an array of e-mail addresses to which the alert is '
'sent.')
c.argument('email_account_admins', arg_type=get_three_state_flag(), help='Specifies that the alert is sent to '
'the account administrators.')
c.argument('storage_endpoint', type=str, help='Specifies the blob storage endpoint (e.g. '
'https://MyAccount.blob.core.windows.net). This blob storage will hold all Threat Detection audit '
'logs.')
c.argument('storage_account_access_key', type=str, help='Specifies the identifier key of the Threat Detection '
'audit storage account.')
c.argument('retention_days', type=int, help='Specifies the number of days to keep in the Threat Detection '
'audit logs.')
with self.argument_context('synapse sql-pool-security-alert-policy update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
c.argument('state', arg_type=get_enum_type(['New', 'Enabled', 'Disabled']), help='Specifies the state of the '
'policy, whether it is enabled or disabled or a policy has not been applied yet on the specific Sql '
'pool.')
c.argument('disabled_alerts', nargs='+', help='Specifies an array of alerts that are disabled. Allowed values '
'are: Sql_Injection, Sql_Injection_Vulnerability, Access_Anomaly, Data_Exfiltration, Unsafe_Action')
c.argument('email_addresses', nargs='+', help='Specifies an array of e-mail addresses to which the alert is '
'sent.')
c.argument('email_account_admins', arg_type=get_three_state_flag(), help='Specifies that the alert is sent to '
'the account administrators.')
c.argument('storage_endpoint', type=str, help='Specifies the blob storage endpoint (e.g. '
'https://MyAccount.blob.core.windows.net). This blob storage will hold all Threat Detection audit '
'logs.')
c.argument('storage_account_access_key', type=str, help='Specifies the identifier key of the Threat Detection '
'audit storage account.')
c.argument('retention_days', type=int, help='Specifies the number of days to keep in the Threat Detection '
'audit logs.')
with self.argument_context('synapse sql-pool-vulnerability-assessment-rule-baseline create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('sql_pool_name', type=str, help='SQL pool name')
c.argument('rule_id', type=str, help='The vulnerability assessment rule ID.')
c.argument('baseline_name', arg_type=get_enum_type(['master', 'default']), help='The name of the vulnerability '
'assessment rule baseline (default implies a baseline on a Sql pool level rule and master for '
'workspace level rule).')
c.argument('baseline_results', action=AddBaselineResults, nargs='+', help='The rule baseline result')
with self.argument_context('synapse sql-pool-vulnerability-assessment-rule-baseline update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
c.argument('rule_id', type=str, help='The vulnerability assessment rule ID.', id_part='child_name_3')
c.argument('baseline_name', arg_type=get_enum_type(['master', 'default']), help='The name of the vulnerability '
'assessment rule baseline (default implies a baseline on a Sql pool level rule and master for '
'workspace level rule).', id_part='child_name_4')
c.argument('baseline_results', action=AddBaselineResults, nargs='+', help='The rule baseline result')
with self.argument_context('synapse sql-pool-vulnerability-assessment-rule-baseline delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('sql_pool_name', type=str, help='SQL pool name', id_part='child_name_1')
c.argument('rule_id', type=str, help='The vulnerability assessment rule ID.', id_part='child_name_3')
c.argument('baseline_name', arg_type=get_enum_type(['master', 'default']), help='The name of the vulnerability '
'assessment rule baseline (default implies a baseline on a Sql pool level rule and master for '
'workspace level rule).', id_part='child_name_4')
with self.argument_context('synapse workspace list') as c:
c.argument('resource_group_name', resource_group_name_type)
with self.argument_context('synapse workspace show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', options_list=['--name', '-n', '--workspace-name'], type=str, help='The name of '
'the workspace', id_part='name')
with self.argument_context('synapse workspace create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', options_list=['--name', '-n', '--workspace-name'], type=str, help='The name of '
'the workspace')
c.argument('tags', tags_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('default_data_lake_storage', action=AddDefaultDataLakeStorage, nargs='+', help='Workspace default '
'data lake storage account details')
c.argument('sql_administrator_login_password', type=str, help='SQL administrator login password')
c.argument('managed_resource_group_name', type=str, help='Workspace managed resource group. The resource group '
'name uniquely identifies the resource group within the user subscriptionId. The resource group '
'name must be no longer than 90 characters long, and must be alphanumeric characters '
'(Char.IsLetterOrDigit()) and \'-\', \'_\', \'(\', \')\' and\'.\'. Note that the name cannot end '
'with \'.\'')
c.argument('sql_administrator_login', type=str, help='Login for workspace SQL active directory administrator')
c.argument('connectivity_endpoints', action=AddConnectivityEndpoints, nargs='+', help='Connectivity endpoints '
'Expect value: KEY1=VALUE1 KEY2=VALUE2 ...')
c.argument('managed_virtual_network', type=str, help='Setting this to \'default\' will ensure that all compute '
'for this workspace is in a virtual network managed on behalf of the user.')
c.argument('private_endpoint_connections', action=AddPrivateEndpointConnections, nargs='+', help='Private '
'endpoint connections to the workspace')
c.argument('virtual_network_profile_compute_subnet_id', type=str, help='Subnet ID used for computes in '
'workspace')
c.argument('identity_type', arg_type=get_enum_type(['None', 'SystemAssigned']), help='The type of managed '
'identity for the workspace')
with self.argument_context('synapse workspace update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', options_list=['--name', '-n', '--workspace-name'], type=str, help='The name of '
'the workspace', id_part='name')
c.argument('tags', tags_type)
c.argument('sql_administrator_login_password', type=str, help='SQL administrator login password')
c.argument('identity_type', arg_type=get_enum_type(['None', 'SystemAssigned']), help='The type of managed '
'identity for the workspace')
with self.argument_context('synapse workspace delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', options_list=['--name', '-n', '--workspace-name'], type=str, help='The name of '
'the workspace', id_part='name')
with self.argument_context('synapse workspace wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', options_list=['--name', '-n', '--workspace-name'], type=str, help='The name of '
'the workspace', id_part='name')
with self.argument_context('synapse workspace-aad-admin show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
with self.argument_context('synapse workspace-aad-admin create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('tenant_id', type=str, help='Tenant ID of the workspace active directory administrator')
c.argument('login', type=str, help='Login of the workspace active directory administrator')
c.argument('administrator_type', type=str, help='Workspace active directory administrator type')
c.argument('sid', type=str, help='Object ID of the workspace active directory administrator')
with self.argument_context('synapse workspace-aad-admin update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('tenant_id', type=str, help='Tenant ID of the workspace active directory administrator')
c.argument('login', type=str, help='Login of the workspace active directory administrator')
c.argument('administrator_type', type=str, help='Workspace active directory administrator type')
c.argument('sid', type=str, help='Object ID of the workspace active directory administrator')
with self.argument_context('synapse workspace-aad-admin delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
with self.argument_context('synapse workspace-aad-admin wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
with self.argument_context('synapse workspace-managed-identity-sql-control-setting show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
with self.argument_context('synapse workspace-managed-identity-sql-control-setting create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('grant_sql_control_to_managed_identity_desired_state', arg_type=get_enum_type(['Enabled',
'Disabled']),
help='Desired state')
with self.argument_context('synapse workspace-managed-identity-sql-control-setting update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('grant_sql_control_to_managed_identity_desired_state', arg_type=get_enum_type(['Enabled',
'Disabled']),
help='Desired state')
with self.argument_context('synapse integration-runtime list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
with self.argument_context('synapse integration-runtime show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('integration_runtime_name', options_list=['--name', '-n', '--integration-runtime-name'], type=str,
help='Integration runtime name', id_part='child_name_1')
c.argument('if_none_match', type=str, help='ETag of the integration runtime entity. Should only be specified '
'for get. If the ETag matches the existing entity tag, or if * was provided, then no content will '
'be returned.')
with self.argument_context('synapse integration-runtime create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('integration_runtime_name', options_list=['--name', '-n', '--integration-runtime-name'], type=str,
help='Integration runtime name')
c.argument('if_match', type=str, help='ETag of the integration runtime entity. Should only be specified for '
'update, for which it should match existing entity or can be * for unconditional update.')
c.argument('properties', type=validate_file_or_dict, help='Integration runtime properties. Expected value: '
'json-string/@json-file.')
with self.argument_context('synapse integration-runtime update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('integration_runtime_name', options_list=['--name', '-n', '--integration-runtime-name'], type=str,
help='Integration runtime name', id_part='child_name_1')
c.argument('auto_update', arg_type=get_enum_type(['On', 'Off']), help='Enables or disables the auto-update '
'feature of the self-hosted integration runtime. See https://go.microsoft.com/fwlink/?linkid=854189.'
'')
c.argument('update_delay_offset', type=str, help='The time offset (in hours) in the day, e.g., PT03H is 3 '
'hours. The integration runtime auto update will happen on that time.')
with self.argument_context('synapse integration-runtime delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('integration_runtime_name', options_list=['--name', '-n', '--integration-runtime-name'], type=str,
help='Integration runtime name', id_part='child_name_1')
with self.argument_context('synapse integration-runtime start') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('integration_runtime_name', options_list=['--name', '-n', '--integration-runtime-name'], type=str,
help='Integration runtime name', id_part='child_name_1')
with self.argument_context('synapse integration-runtime stop') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('integration_runtime_name', options_list=['--name', '-n', '--integration-runtime-name'], type=str,
help='Integration runtime name', id_part='child_name_1')
with self.argument_context('synapse integration-runtime upgrade') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('integration_runtime_name', options_list=['--name', '-n', '--integration-runtime-name'], type=str,
help='Integration runtime name', id_part='child_name_1')
with self.argument_context('synapse integration-runtime wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('integration_runtime_name', options_list=['--name', '-n', '--integration-runtime-name'], type=str,
help='Integration runtime name', id_part='child_name_1')
c.argument('if_none_match', type=str, help='ETag of the integration runtime entity. Should only be specified '
'for get. If the ETag matches the existing entity tag, or if * was provided, then no content will '
'be returned.')
with self.argument_context('synapse integration-runtime-node-ip-address get') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('integration_runtime_name', type=str, help='Integration runtime name', id_part='child_name_1')
c.argument('node_name', type=str, help='Integration runtime node name', id_part='child_name_2')
with self.argument_context('synapse integration-runtime-object-metadata get') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('integration_runtime_name', type=str, help='Integration runtime name', id_part='child_name_1')
c.argument('metadata_path', type=str, help='Metadata path.')
with self.argument_context('synapse integration-runtime-object-metadata refresh') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('integration_runtime_name', type=str, help='Integration runtime name', id_part='child_name_1')
with self.argument_context('synapse integration-runtime-node show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('integration_runtime_name', type=str, help='Integration runtime name', id_part='child_name_1')
c.argument('node_name', type=str, help='Integration runtime node name', id_part='child_name_2')
with self.argument_context('synapse integration-runtime-node update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('integration_runtime_name', type=str, help='Integration runtime name', id_part='child_name_1')
c.argument('node_name', type=str, help='Integration runtime node name', id_part='child_name_2')
c.argument('concurrent_jobs_limit', type=int, help='The number of concurrent jobs permitted to run on the '
'integration runtime node. Values between 1 and maxConcurrentJobs(inclusive) are allowed.')
with self.argument_context('synapse integration-runtime-node delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('integration_runtime_name', type=str, help='Integration runtime name', id_part='child_name_1')
c.argument('node_name', type=str, help='Integration runtime node name', id_part='child_name_2')
with self.argument_context('synapse integration-runtime-credentials sync') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('integration_runtime_name', type=str, help='Integration runtime name', id_part='child_name_1')
with self.argument_context('synapse integration-runtime-connection-info get') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('integration_runtime_name', type=str, help='Integration runtime name', id_part='child_name_1')
with self.argument_context('synapse integration-runtime-auth-key list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('integration_runtime_name', type=str, help='Integration runtime name')
with self.argument_context('synapse integration-runtime-auth-key regenerate') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('integration_runtime_name', type=str, help='Integration runtime name', id_part='child_name_1')
c.argument('key_name', arg_type=get_enum_type(['authKey1', 'authKey2']), help='The name of the authentication '
'key to regenerate.')
with self.argument_context('synapse integration-runtime-monitoring-data get') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('integration_runtime_name', type=str, help='Integration runtime name', id_part='child_name_1')
with self.argument_context('synapse integration-runtime-status get') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('integration_runtime_name', type=str, help='Integration runtime name', id_part='child_name_1')
with self.argument_context('synapse private-link-resource list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
with self.argument_context('synapse private-link-resource show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('private_link_resource_name', options_list=['--name', '-n', '--private-link-resource-name'],
type=str, help='The name of the private link resource', id_part='child_name_1')
with self.argument_context('synapse private-endpoint-connection list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
with self.argument_context('synapse private-endpoint-connection show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('private_endpoint_connection_name', options_list=['--name', '-n', '--private-endpoint-connection-nam'
'e'], type=str, help='The name of the private '
'endpoint connection.', id_part='child_name_1')
with self.argument_context('synapse private-endpoint-connection create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace')
c.argument('private_endpoint_connection_name', options_list=['--name', '-n', '--private-endpoint-connection-nam'
'e'], type=str, help='The name of the private '
'endpoint connection.')
with self.argument_context('synapse private-endpoint-connection delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('private_endpoint_connection_name', options_list=['--name', '-n', '--private-endpoint-connection-nam'
'e'], type=str, help='The name of the private '
'endpoint connection.', id_part='child_name_1')
with self.argument_context('synapse private-endpoint-connection wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of the workspace', id_part='name')
c.argument('private_endpoint_connection_name', options_list=['--name', '-n', '--private-endpoint-connection-nam'
'e'], type=str, help='The name of the private '
'endpoint connection.', id_part='child_name_1')
with self.argument_context('synapse private-link-hub list') as c:
c.argument('resource_group_name', resource_group_name_type)
with self.argument_context('synapse private-link-hub show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('private_link_hub_name', options_list=['--name', '-n', '--private-link-hub-name'], type=str,
help='The name of the privateLinkHub', id_part='name')
with self.argument_context('synapse private-link-hub create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('private_link_hub_name', options_list=['--name', '-n', '--private-link-hub-name'], type=str,
help='The name of the privateLinkHub')
c.argument('tags', tags_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
with self.argument_context('synapse private-link-hub update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('private_link_hub_name', options_list=['--name', '-n', '--private-link-hub-name'], type=str,
help='The name of the privateLinkHub', id_part='name')
c.argument('tags', tags_type)
with self.argument_context('synapse private-link-hub delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('private_link_hub_name', options_list=['--name', '-n', '--private-link-hub-name'], type=str,
help='The name of the privateLinkHub', id_part='name')
| 74.756531
| 120
| 0.682145
| 9,617
| 71,542
| 4.877093
| 0.057398
| 0.091913
| 0.068716
| 0.068759
| 0.930537
| 0.92412
| 0.918385
| 0.910475
| 0.899815
| 0.894484
| 0
| 0.002613
| 0.197702
| 71,542
| 956
| 121
| 74.834728
| 0.814541
| 0.007073
| 0
| 0.755422
| 0
| 0.016867
| 0.493298
| 0.093363
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001205
| false
| 0.007229
| 0.003614
| 0
| 0.004819
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
72fd370a9356bb6b012b61553a0b20a94b97bc5a
| 32
|
py
|
Python
|
packages/autocomplete-python/spec/fixtures/packages/test_pkg/__init__.py
|
tpaclatee/atom-setup-python
|
0d4038b948e69b72a0beda2620c4ce559be9dde8
|
[
"MIT"
] | 1
|
2018-12-13T07:11:00.000Z
|
2018-12-13T07:11:00.000Z
|
packages/autocomplete-python/spec/fixtures/packages/test_pkg/__init__.py
|
tpaclatee/atom-setup-python
|
0d4038b948e69b72a0beda2620c4ce559be9dde8
|
[
"MIT"
] | null | null | null |
packages/autocomplete-python/spec/fixtures/packages/test_pkg/__init__.py
|
tpaclatee/atom-setup-python
|
0d4038b948e69b72a0beda2620c4ce559be9dde8
|
[
"MIT"
] | null | null | null |
def FooBar():
return "BarFoo"
| 10.666667
| 17
| 0.65625
| 4
| 32
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 32
| 2
| 18
| 16
| 0.807692
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
f40b052513ed8f341b3b2c2f97f9b01c9f37cb2d
| 3,224
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowLispPrefixList/cli/equal/golden_show_lisp_prefix_list_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowLispPrefixList/cli/equal/golden_show_lisp_prefix_list_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowLispPrefixList/cli/equal/golden_show_lisp_prefix_list_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | null | null | null |
expected_output = {
'lisp_id': {
0: {
'prefix_list_name': {
'site1': {
'number_of_entries': 4,
'users': [{
'itr_map_resolver': '100.100.100.100'
}, {
'itr_map_resolver': '44.44.44.44'
}, {
'etr_map_server': '100.100.100.100'
}, {
'etr_map_server': '44.44.44.44'
}, {
'itr_map_resolver': '100.100.100.100'
}, {
'itr_map_resolver': '44.44.44.44'
}, {
'etr_map_server': '100.100.100.100'
}, {
'etr_map_server': '44.44.44.44'
}, {
'itr_map_resolver': '100.100.100.100'
}, {
'itr_map_resolver': '44.44.44.44'
}, {
'etr_map_server': '100.100.100.100'
}, {
'etr_map_server': '44.44.44.44'
}, {
'route_import': ' '
}, {
'import_publication': ' '
}, {
'import': ' '
}, {
'itr_map_resolver': '100.100.100.100'
}, {
'itr_map_resolver': '44.44.44.44'
}, {
'etr_map_server': '100.100.100.100'
}, {
'etr_map_server': '100.100.100.100'
}, {
'etr_map_server': '44.44.44.44'
}, {
'etr_map_server': '44.44.44.44'
}],
'entries': {
'2001:192:168:1::/64': {
'sources': 'static',
'first_added': '22:56:16',
'last_verified_by': 'by static',
'last_verified': '22:56:16'
},
'2001:193:168:1::/64': {
'sources': 'static',
'first_added': '22:56:16',
'last_verified_by': 'by static',
'last_verified': '22:56:16'
},
'192.168.1.0/24': {
'sources': 'static',
'first_added': '22:56:16',
'last_verified_by': 'by static',
'last_verified': '22:56:16'
},
'193.168.1.0/24': {
'sources': 'static',
'first_added': '22:56:16',
'last_verified_by': 'by static',
'last_verified': '22:56:16'
}
}
}
}
}
}
}
| 39.802469
| 61
| 0.277916
| 234
| 3,224
| 3.57265
| 0.175214
| 0.19378
| 0.19378
| 0.129187
| 0.854067
| 0.854067
| 0.854067
| 0.830144
| 0.830144
| 0.830144
| 0
| 0.212423
| 0.595534
| 3,224
| 80
| 62
| 40.3
| 0.428681
| 0
| 0
| 0.675
| 0
| 0
| 0.30273
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0375
| 0
| 0.0375
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
f42afd47da69f1776117c864eda9b8f262e52694
| 120
|
py
|
Python
|
models/__init__.py
|
erp27/odoo_crm_checklist
|
eadc93573b85ab15f510a4a1cbbecdf412085631
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
erp27/odoo_crm_checklist
|
eadc93573b85ab15f510a4a1cbbecdf412085631
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
erp27/odoo_crm_checklist
|
eadc93573b85ab15f510a4a1cbbecdf412085631
|
[
"MIT"
] | 1
|
2020-11-19T17:02:05.000Z
|
2020-11-19T17:02:05.000Z
|
# -*- coding: utf-8 -*-
from . import res_user_inherit
from . import crm_lead_checklist
from . import crm_lead_inherit
| 20
| 32
| 0.75
| 18
| 120
| 4.666667
| 0.611111
| 0.357143
| 0.309524
| 0.404762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009804
| 0.15
| 120
| 5
| 33
| 24
| 0.813725
| 0.175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
f42d620da50dae8796c099a1bf2304a2db7de88c
| 3,096
|
py
|
Python
|
backend/users/migrations/0003_auto_20200212_2137.py
|
draihal/main-pr
|
81814c5370b592963e91ad0683caa560b0ea9579
|
[
"MIT"
] | 2
|
2021-01-28T08:23:15.000Z
|
2021-03-09T06:06:58.000Z
|
backend/users/migrations/0003_auto_20200212_2137.py
|
draihal/main-pr
|
81814c5370b592963e91ad0683caa560b0ea9579
|
[
"MIT"
] | 9
|
2020-01-02T15:31:04.000Z
|
2021-12-09T01:59:26.000Z
|
backend/users/migrations/0003_auto_20200212_2137.py
|
draihal/main-pr
|
81814c5370b592963e91ad0683caa560b0ea9579
|
[
"MIT"
] | 1
|
2021-03-09T06:11:16.000Z
|
2021-03-09T06:11:16.000Z
|
# Generated by Django 2.2.9 on 2020-02-12 18:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20200114_2308'),
]
operations = [
migrations.AlterField(
model_name='partner',
name='company',
field=models.CharField(blank=True, max_length=255, verbose_name='Название компании'),
),
migrations.AlterField(
model_name='student',
name='birth_date',
field=models.DateField(blank=True, null=True, verbose_name='Дата рождения'),
),
migrations.AlterField(
model_name='student',
name='can_full_time',
field=models.BooleanField(default=False, verbose_name='Полный день'),
),
migrations.AlterField(
model_name='student',
name='can_part_time',
field=models.BooleanField(default=False, verbose_name='Гибкий график'),
),
migrations.AlterField(
model_name='student',
name='can_relocate',
field=models.BooleanField(default=False, verbose_name='Готовность к переезду'),
),
migrations.AlterField(
model_name='student',
name='can_remote',
field=models.BooleanField(default=False, verbose_name='Удаленно'),
),
migrations.AlterField(
model_name='student',
name='city',
field=models.CharField(blank=True, max_length=127, verbose_name='Город'),
),
migrations.AlterField(
model_name='student',
name='country',
field=models.CharField(choices=[('NA', 'Не указано'), ('RU', 'Россия'), ('BY', 'Республика Беларусь'), ('KZ', 'Казахстан'), ('UA', 'Украина')], default='NA', max_length=2, verbose_name='Страна'),
),
migrations.AlterField(
model_name='student',
name='sex',
field=models.CharField(choices=[('0', 'Не указано'), ('m', 'Мужской'), ('f', 'Женский')], default='0', max_length=1, verbose_name='Пол'),
),
migrations.AlterField(
model_name='teacher',
name='birth_date',
field=models.DateField(blank=True, null=True, verbose_name='Дата рождения'),
),
migrations.AlterField(
model_name='teacher',
name='city',
field=models.CharField(blank=True, max_length=127, verbose_name='Город'),
),
migrations.AlterField(
model_name='teacher',
name='country',
field=models.CharField(choices=[('NA', 'Не указано'), ('RU', 'Россия'), ('BY', 'Республика Беларусь'), ('KZ', 'Казахстан'), ('UA', 'Украина')], default='NA', max_length=2, verbose_name='Страна'),
),
migrations.AlterField(
model_name='teacher',
name='sex',
field=models.CharField(choices=[('0', 'Не указано'), ('m', 'Мужской'), ('f', 'Женский')], default='0', max_length=1, verbose_name='Пол'),
),
]
| 39.189873
| 207
| 0.567183
| 306
| 3,096
| 5.594771
| 0.294118
| 0.151869
| 0.189836
| 0.22021
| 0.827103
| 0.827103
| 0.743575
| 0.584112
| 0.525701
| 0.525701
| 0
| 0.021486
| 0.278424
| 3,096
| 78
| 208
| 39.692308
| 0.744852
| 0.014535
| 0
| 0.75
| 1
| 0
| 0.173172
| 0.007543
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013889
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f431b9ebf9db4a98efb9f2179f42d45b4df8c3d5
| 294
|
py
|
Python
|
Shivam Loops/table.py
|
Shivams9/pythoncodecamp
|
e6cd27f4704a407ee360414a8c9236b254117a59
|
[
"MIT"
] | null | null | null |
Shivam Loops/table.py
|
Shivams9/pythoncodecamp
|
e6cd27f4704a407ee360414a8c9236b254117a59
|
[
"MIT"
] | null | null | null |
Shivam Loops/table.py
|
Shivams9/pythoncodecamp
|
e6cd27f4704a407ee360414a8c9236b254117a59
|
[
"MIT"
] | null | null | null |
#table of 2
n=10
for i in range(n):
print((i+1)*2 ,"," , end="")
#-9
# n = 10
for i in range(n):
print((i + 1) * (-9), ",", end="")
#table of -10
n = 10
for i in range(n):
print((i+1)*(.2),",", end="")
n=10
for i in range(n):
print((i+1)*(-9), ",", end="")
| 13.363636
| 42
| 0.414966
| 55
| 294
| 2.218182
| 0.236364
| 0.098361
| 0.196721
| 0.229508
| 0.852459
| 0.852459
| 0.852459
| 0.852459
| 0.852459
| 0.852459
| 0
| 0.095694
| 0.289116
| 294
| 21
| 43
| 14
| 0.488038
| 0.105442
| 0
| 0.818182
| 0
| 0
| 0.015625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.363636
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
be4468032cb81d1f06a3689e67a3d4523e841e26
| 148
|
py
|
Python
|
atlas/foundations_events/src/test/producers/__init__.py
|
DeepLearnI/atlas
|
8aca652d7e647b4e88530b93e265b536de7055ed
|
[
"Apache-2.0"
] | 296
|
2020-03-16T19:55:00.000Z
|
2022-01-10T19:46:05.000Z
|
atlas/foundations_events/src/test/producers/__init__.py
|
DeepLearnI/atlas
|
8aca652d7e647b4e88530b93e265b536de7055ed
|
[
"Apache-2.0"
] | 57
|
2020-03-17T11:15:57.000Z
|
2021-07-10T14:42:27.000Z
|
atlas/foundations_events/src/test/producers/__init__.py
|
DeepLearnI/atlas
|
8aca652d7e647b4e88530b93e265b536de7055ed
|
[
"Apache-2.0"
] | 38
|
2020-03-17T21:06:05.000Z
|
2022-02-08T03:19:34.000Z
|
from test.producers.jobs import *
from test.producers.test_metric_logged import TestMetricLogged
from test.producers.test_tag_set import TestTagSet
| 37
| 62
| 0.871622
| 21
| 148
| 5.952381
| 0.52381
| 0.192
| 0.408
| 0.336
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 148
| 4
| 63
| 37
| 0.919118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
be61358065ae46ce871eec27445b4fc7fff7c9c1
| 2,133
|
py
|
Python
|
tests/test_tosca_node_mongo.py
|
lucarin91/tosKer
|
6cb2be133e39b3788a758b73f9f65c1f505a8e5b
|
[
"MIT"
] | 7
|
2017-10-18T00:07:26.000Z
|
2021-08-03T14:23:24.000Z
|
tests/test_tosca_node_mongo.py
|
lucarin91/tosKer
|
6cb2be133e39b3788a758b73f9f65c1f505a8e5b
|
[
"MIT"
] | 28
|
2017-06-30T11:15:16.000Z
|
2021-02-26T23:14:25.000Z
|
tests/test_tosca_node_mongo.py
|
lucarin91/tosKer
|
6cb2be133e39b3788a758b73f9f65c1f505a8e5b
|
[
"MIT"
] | 6
|
2017-10-19T08:30:09.000Z
|
2020-06-16T13:25:38.000Z
|
import unittest
from .test_tosca_base import TestToscaBase
class TestNodeMongo(TestToscaBase):
def test_csar(self):
file = 'data/examples/node-mongo-csar/node-mongo.csar'
up = self.o.read_plan_file(
'data/examples/node-mongo-csar/node-mongo.up.plan'
)
down = self.o.read_plan_file(
'data/examples/node-mongo-csar/node-mongo.down.plan'
)
self.assert_up_start(file, up)
self.assert_down(file, down)
def test_mix1(self):
file = 'data/examples/node-mongo/node-mongo-mix1.yaml'
up = self.o.read_plan_file(
'data/examples/node-mongo/node-mongo-mix1.up.plan'
)
down = self.o.read_plan_file(
'data/examples/node-mongo/node-mongo-mix1.down.plan'
)
self.assert_up_start(file, up)
self.assert_down(file, down)
def test_mix2(self):
file = 'data/examples/node-mongo/node-mongo-mix2.yaml'
up = self.o.read_plan_file(
'data/examples/node-mongo/node-mongo-mix2.up.plan'
)
down = self.o.read_plan_file(
'data/examples/node-mongo/node-mongo-mix2.down.plan'
)
self.assert_up_start(file, up)
self.assert_down(file, down)
def test_single_server(self):
file = 'data/examples/node-mongo/node-mongo-single-server.yaml'
up = self.o.read_plan_file(
'data/examples/node-mongo/node-mongo-single-server.up.plan'
)
down = self.o.read_plan_file(
'data/examples/node-mongo/node-mongo-single-server.down.plan'
)
self.assert_up_start(file, up)
self.assert_down(file, down)
def test_custom(self):
file = 'data/examples/node-mongo/node-mongo-custom.yaml'
up = self.o.read_plan_file(
'data/examples/node-mongo/node-mongo-custom.up.plan'
)
down = self.o.read_plan_file(
'data/examples/node-mongo/node-mongo-custom.down.plan'
)
self.assert_up_start(file, up)
self.assert_down(file, down)
if __name__ == '__main__':
unittest.main()
| 32.815385
| 73
| 0.616034
| 290
| 2,133
| 4.355172
| 0.113793
| 0.213777
| 0.190024
| 0.23753
| 0.885986
| 0.885986
| 0.882819
| 0.882819
| 0.748219
| 0.711797
| 0
| 0.005051
| 0.257384
| 2,133
| 64
| 74
| 33.328125
| 0.792298
| 0
| 0
| 0.363636
| 0
| 0
| 0.35443
| 0.35068
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.090909
| false
| 0
| 0.036364
| 0
| 0.145455
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
be7ddb27c5d2d14a238080cc1cd2cc38409b8fcb
| 134
|
py
|
Python
|
core/__init__.py
|
aiobot-docs/AIO-Bot
|
a22c9dcc3b718b1c1311b13f838a9b8ead8e27d6
|
[
"Apache-2.0"
] | 2
|
2020-10-14T18:20:41.000Z
|
2020-10-14T18:20:44.000Z
|
core/__init__.py
|
aiobot-docs/AIO-Bot
|
a22c9dcc3b718b1c1311b13f838a9b8ead8e27d6
|
[
"Apache-2.0"
] | null | null | null |
core/__init__.py
|
aiobot-docs/AIO-Bot
|
a22c9dcc3b718b1c1311b13f838a9b8ead8e27d6
|
[
"Apache-2.0"
] | null | null | null |
#Everything In Core
from core import command_handler
from core import message_handler
from core import patch
from core import database
| 26.8
| 32
| 0.858209
| 21
| 134
| 5.380952
| 0.47619
| 0.283186
| 0.495575
| 0.371681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134328
| 134
| 5
| 33
| 26.8
| 0.974138
| 0.134328
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
be931f7d4d63e00c246cdc201529aaae99dbb41d
| 1,189
|
py
|
Python
|
test/test_address.py
|
piersto/python_training_5
|
c608355be8b643656d3078945a92241ad81811f7
|
[
"Apache-2.0"
] | null | null | null |
test/test_address.py
|
piersto/python_training_5
|
c608355be8b643656d3078945a92241ad81811f7
|
[
"Apache-2.0"
] | null | null | null |
test/test_address.py
|
piersto/python_training_5
|
c608355be8b643656d3078945a92241ad81811f7
|
[
"Apache-2.0"
] | null | null | null |
def test_address_on_home_page(app):
# we will do the test for one contact so we write index 0 = [0]
contact_from_home_page = app.contact.get_contact_list()[0]
# now we will get contact info from edit page with index 0 as well
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_home_page.address == contact_from_edit_page.address
def test_lastname_on_home_page(app):
# we will do the test for one contact so we write index 0 = [0]
contact_from_home_page = app.contact.get_contact_list()[0]
# now we will get contact info from edit page with index 0 as well
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_home_page.lastname == contact_from_edit_page.lastname
def test_firstname_on_home_page(app):
# we will do the test for one contact so we write index 0 = [0]
contact_from_home_page = app.contact.get_contact_list()[0]
# now we will get contact info from edit page with index 0 as well
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_home_page.firstname == contact_from_edit_page.firstname
| 41
| 79
| 0.772077
| 207
| 1,189
| 4.101449
| 0.144928
| 0.155477
| 0.169611
| 0.134276
| 0.823322
| 0.823322
| 0.823322
| 0.823322
| 0.823322
| 0.823322
| 0
| 0.015152
| 0.167368
| 1,189
| 28
| 80
| 42.464286
| 0.842424
| 0.319596
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
be95ce0f55d5b157fdb84d43bf65863de8eab536
| 2,148
|
py
|
Python
|
mera/unittest_example/load_all_prepared_tests_as_suite.py
|
MikeLaptev/sandbox_python
|
90d9b520d24602fa298abed4bb85232e12550fb2
|
[
"Apache-2.0"
] | 1
|
2016-02-25T19:01:01.000Z
|
2016-02-25T19:01:01.000Z
|
mera/unittest_example/load_all_prepared_tests_as_suite.py
|
MikeLaptev/sandbox_python
|
90d9b520d24602fa298abed4bb85232e12550fb2
|
[
"Apache-2.0"
] | null | null | null |
mera/unittest_example/load_all_prepared_tests_as_suite.py
|
MikeLaptev/sandbox_python
|
90d9b520d24602fa298abed4bb85232e12550fb2
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Jul 30, 2015
@author: Mikhail
'''
import unittest
def loadTestSuitesWithAllTestByDiscover():
# pattern should be like a bash shell pattern matching
example_of_loader = unittest.TestLoader()
list_of_modules = example_of_loader.discover(".", "generate_and_load_unittest_update_[a-z][a-z][a-z].py")
# Each module has type unittest.TestSuite
# Lets run all these modules one by one
for module in list_of_modules:
actual_result = unittest.TestResult()
print "<<< Launching of {} test cases >>>".format(module.countTestCases())
module.run(actual_result)
print "Launch statistic"
print "No errors occur " if len(actual_result.errors) == 0 else "{} error(s) occurs".format(len(actual_result.errors))
print "No skipped tests " if len(actual_result.skipped) == 0 else "{} test(s) has been skipped".format(len(actual_result.skipped))
print "Run was successful " if actual_result.wasSuccessful() else "Run contains test that did not complete successfully"
def loadTestSuitesWithSelectedTestsByDiscover():
# pattern should be like a bash shell pattern matching
example_of_loader = unittest.TestLoader()
list_of_modules = example_of_loader.discover(".", "generate_and_load_unittest_update_[a-z][a-z][a-z][a-z].py")
# Each module has type unittest.TestSuite
# Lets run all these modules one by one
for module in list_of_modules:
actual_result = unittest.TestResult()
print "<<< Launching of {} test cases >>>".format(module.countTestCases())
module.run(actual_result)
print "Launch statistic"
print "No errors occur " if len(actual_result.errors) == 0 else "{} error(s) occurs".format(len(actual_result.errors))
print "No skipped tests " if len(actual_result.skipped) == 0 else "{} test(s) has been skipped".format(len(actual_result.skipped))
print "Run was successful " if actual_result.wasSuccessful() else "Run contains test that did not complete successfully"
if __name__ == "__main__":
loadTestSuitesWithAllTestByDiscover()
loadTestSuitesWithSelectedTestsByDiscover()
| 53.7
| 138
| 0.715549
| 278
| 2,148
| 5.356115
| 0.291367
| 0.112827
| 0.080591
| 0.013432
| 0.856951
| 0.856951
| 0.856951
| 0.856951
| 0.856951
| 0.856951
| 0
| 0.005675
| 0.179702
| 2,148
| 40
| 139
| 53.7
| 0.839387
| 0.121508
| 0
| 0.692308
| 0
| 0
| 0.282205
| 0.059498
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.038462
| null | null | 0.384615
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
be98a41cb433bdd60736c038010c89c6362bcf0d
| 87
|
py
|
Python
|
easy_tweet_deleter/__init__.py
|
TheTallBlonde1/easy_tweet_deleter
|
3d901f38959a432bc2b8456103551310a54520dd
|
[
"MIT"
] | 1
|
2020-10-17T18:05:46.000Z
|
2020-10-17T18:05:46.000Z
|
easy_tweet_deleter/__init__.py
|
TheTallBlonde1/easy_tweet_deleter
|
3d901f38959a432bc2b8456103551310a54520dd
|
[
"MIT"
] | null | null | null |
easy_tweet_deleter/__init__.py
|
TheTallBlonde1/easy_tweet_deleter
|
3d901f38959a432bc2b8456103551310a54520dd
|
[
"MIT"
] | null | null | null |
from .easy_tweet_deleter import (run_easy_twitter_deleter, check_easy_twitter_deleter)
| 43.5
| 86
| 0.896552
| 13
| 87
| 5.384615
| 0.615385
| 0.314286
| 0.514286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057471
| 87
| 1
| 87
| 87
| 0.853659
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
fe26221c493d20658c3cafd2c53bc20e20b72016
| 222
|
py
|
Python
|
pyinstrument/renderers/__init__.py
|
liu35/osf
|
9fcda433e60023dccf6916eeeee9f1b66b439140
|
[
"BSD-3-Clause"
] | 1
|
2021-12-30T00:44:20.000Z
|
2021-12-30T00:44:20.000Z
|
pyinstrument/renderers/__init__.py
|
liu35/osf
|
9fcda433e60023dccf6916eeeee9f1b66b439140
|
[
"BSD-3-Clause"
] | null | null | null |
pyinstrument/renderers/__init__.py
|
liu35/osf
|
9fcda433e60023dccf6916eeeee9f1b66b439140
|
[
"BSD-3-Clause"
] | null | null | null |
from pyinstrument.renderers.base import Renderer
from pyinstrument.renderers.console import ConsoleRenderer
from pyinstrument.renderers.html import HTMLRenderer
from pyinstrument.renderers.jsonrenderer import JSONRenderer
| 44.4
| 60
| 0.891892
| 24
| 222
| 8.25
| 0.458333
| 0.323232
| 0.505051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072072
| 222
| 4
| 61
| 55.5
| 0.961165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
fe5f84d1e9f8f008fd10bb995352f25b10083a59
| 62,084
|
py
|
Python
|
MDV_utilities/mass_isotopomer_distributions.py
|
dmccloskey/MDV_utilities
|
4b892c88ccab244ea7885219ffcbc3a345b71171
|
[
"MIT"
] | 1
|
2017-05-09T23:17:41.000Z
|
2017-05-09T23:17:41.000Z
|
MDV_utilities/mass_isotopomer_distributions.py
|
dmccloskey/MDV_utilities
|
4b892c88ccab244ea7885219ffcbc3a345b71171
|
[
"MIT"
] | null | null | null |
MDV_utilities/mass_isotopomer_distributions.py
|
dmccloskey/MDV_utilities
|
4b892c88ccab244ea7885219ffcbc3a345b71171
|
[
"MIT"
] | null | null | null |
from molmass.molmass import Formula
from scipy.stats import mode
import numpy
import re
class mass_isotopomer_distributions():
def __init__(self):
return;
def build_precursorSpectrumFromMRMs(self,peakSpectrum_I,blankSpectrum_I):
'''extract maximum intensity peak'''
# Input:
# peakSpectrum_I = {fragment:{(precursor_mass,product_mass):intensity}}
# peakSpectrum_I = {fragment:{(precursor_mass,product_mass):intensity}}
# Output:
# peakSpectrum_theoretical = {fragment:{mass:intensity}}
# peakSpectrum_measured = {fragment:{mass:[measuredMass,intensity]}}
# peakSpectrum_corrected = {fragment:{mass:[measuredMass,intensity]}}
# peakSpectrum_normalized = {fragment:{mass:[measuredMass,intensity]}}
fragments_I = list(peakSpectrum_I.keys());
# round all precursor/product masses in input for comparison:
peakSpectrum_copy_I = {};
for frag,spec in peakSpectrum_I.items():
peakSpectrum_tmp = {};
for masses,intensity in spec.items():
peakSpectrum_tmp[(numpy.around(masses[0]),numpy.around(masses[1]))] = intensity;
peakSpectrum_copy_I[frag] = peakSpectrum_tmp;
blankSpectrum_copy_I = {};
for frag,spec in blankSpectrum_I.items():
blankSpectrum_tmp = {};
for masses,intensity in spec.items():
blankSpectrum_tmp[(numpy.around(masses[0]),numpy.around(masses[1]))] = intensity;
blankSpectrum_copy_I[frag] = blankSpectrum_tmp;
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
# determine masses from fragments
masses = [];
peakSpectrum_measured = {};
peakSpectrum_normalized = {};
peakSpectrum_corrected = {};
for frag,spec in peakSpectrum_theoretical.items():
peakSpectrum_measured[frag] = None;
peakSpectrum_corrected[frag] = None;
peakSpectrum_normalized[frag] = None;
if not spec: continue; #check if a carbon is even contained in the fragment
masses = list(spec.keys());
masses.sort(); # sort mass in massList
masses_rounded = numpy.around(masses); # round masses to nearest digit for comparison
# 1. copy data from peakSpectrum_I to peakSpectrum_measured based on theoretical fragments
# 2. generate corrected spectrum
intensityList = [];
if frag in peakSpectrum_I:
precursor_masses = [k[0] for k in peakSpectrum_copy_I[frag].keys()];
measured_spec = {};
corrected_spec = {};
for i,mass in enumerate(masses_rounded): #iterate through theoretical precursor masses
measured = 0.0;
corrected = 0.0;
if mass in precursor_masses:
product_masses = [k[1] for k in peakSpectrum_copy_I[frag].keys() if k[0]==mass];
for product in product_masses: #iterate through measured product masses
if frag in blankSpectrum_copy_I:
blank_precursor_masses = [k[0] for k in blankSpectrum_copy_I[frag].keys()];
if mass in blank_precursor_masses:
blank_product_masses = [k[1] for k in blankSpectrum_copy_I[frag].keys() if k[0]==mass];
if product in blank_product_masses:
if blankSpectrum_copy_I[frag][(mass,product)]<0.5*peakSpectrum_copy_I[frag][(mass,product)]:
corrected += peakSpectrum_copy_I[frag][(mass,product)]-blankSpectrum_copy_I[frag][(mass,product)];
measured += peakSpectrum_copy_I[frag][(mass,product)]
else:
corrected += 0.0;
measured += peakSpectrum_copy_I[frag][(mass,product)]
else:
corrected += peakSpectrum_copy_I[frag][(mass,product)];
measured += peakSpectrum_copy_I[frag][(mass,product)]
else:
corrected += peakSpectrum_copy_I[frag][(mass,product)];
measured += peakSpectrum_copy_I[frag][(mass,product)]
else:
corrected += peakSpectrum_copy_I[frag][(mass,product)];
measured += peakSpectrum_copy_I[frag][(mass,product)];
measured_spec[masses[i]] = measured;
corrected_spec[masses[i]] = corrected;
intensityList.append(corrected);
peakSpectrum_measured[frag] = measured_spec;
peakSpectrum_corrected[frag] = corrected_spec;
# normalize each spectrum:
#NOTE: normalization by max to allow for later conversion to normalization by sum
normalized = {};
intensityListMax = max(intensityList);
for k,v in peakSpectrum_corrected[frag].items():
if intensityListMax != 0: normalized[k] = v/intensityListMax;
else: normalized[k] = None;
peakSpectrum_normalized[frag] = normalized;
return peakSpectrum_measured, peakSpectrum_corrected, peakSpectrum_normalized;
def build_productSpectrumFromMRMs(self,peakSpectrum_I,blankSpectrum_I):
'''extract maximum intensity peak'''
# Input:
# peakSpectrum_I = {fragment:{(product_mass,product_mass):intensity}}
# peakSpectrum_I = {fragment:{(product_mass,product_mass):intensity}}
# Output:
# peakSpectrum_theoretical = {fragment:{mass:intensity}}
# peakSpectrum_measured = {fragment:{mass:intensity}}
# peakSpectrum_corrected = {fragment:{mass:intensity}}
# peakSpectrum_normalized = {fragment:{mass:intensity}}
fragments_I = list(peakSpectrum_I.keys());
# round all precursor/product masses in input for comparison:
peakSpectrum_copy_I = {};
for frag,spec in peakSpectrum_I.items():
peakSpectrum_tmp = {};
for masses,intensity in spec.items():
peakSpectrum_tmp[(numpy.around(masses[0]),numpy.around(masses[1]))] = intensity;
peakSpectrum_copy_I[frag] = peakSpectrum_tmp;
blankSpectrum_copy_I = {};
for frag,spec in blankSpectrum_I.items():
blankSpectrum_tmp = {};
for masses,intensity in spec.items():
blankSpectrum_tmp[(numpy.around(masses[0]),numpy.around(masses[1]))] = intensity;
blankSpectrum_copy_I[frag] = blankSpectrum_tmp;
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
# determine masses from fragments
masses = [];
peakSpectrum_measured = {};
peakSpectrum_normalized = {};
peakSpectrum_corrected = {};
for frag,spec in peakSpectrum_theoretical.items():
peakSpectrum_measured[frag] = None;
peakSpectrum_corrected[frag] = None;
peakSpectrum_normalized[frag] = None;
if not spec: continue; #check if a carbon is even contained in the fragment
masses = list(spec.keys());
masses.sort(); # sort mass in massList
masses_rounded = numpy.around(masses); # round masses to nearest digit for comparison
# 1. copy data from peakSpectrum_I to peakSpectrum_measured based on theoretical fragments
# 2. generate corrected spectrum
intensityList = [];
if frag in peakSpectrum_I:
product_masses = [k[1] for k in peakSpectrum_copy_I[frag].keys()];
measured_spec = {};
corrected_spec = {};
for i,mass in enumerate(masses_rounded): #iterate through theoretical product masses
measured = 0.0;
corrected = 0.0;
if mass in product_masses:
precursor_masses = [k[0] for k in peakSpectrum_copy_I[frag].keys() if k[1]==mass];
for precursor in precursor_masses: #iterate through measured precursor masses
if frag in blankSpectrum_copy_I:
blank_product_masses = [k[1] for k in blankSpectrum_copy_I[frag].keys()];
if mass in blank_product_masses:
blank_precursor_masses = [k[0] for k in blankSpectrum_copy_I[frag].keys() if k[1]==mass];
if precursor in blank_precursor_masses:
if blankSpectrum_copy_I[frag][(precursor,mass)]<0.5*peakSpectrum_copy_I[frag][(precursor,mass)]:
corrected += peakSpectrum_copy_I[frag][(precursor,mass)]-blankSpectrum_copy_I[frag][(precursor,mass)];
measured += peakSpectrum_copy_I[frag][(precursor,mass)]
else:
corrected += 0.0;
measured += peakSpectrum_copy_I[frag][(precursor,mass)]
else:
corrected += peakSpectrum_copy_I[frag][(precursor,mass)];
measured += peakSpectrum_copy_I[frag][(precursor,mass)]
else:
corrected += peakSpectrum_copy_I[frag][(precursor,mass)];
measured += peakSpectrum_copy_I[frag][(precursor,mass)]
else:
corrected += peakSpectrum_copy_I[frag][(precursor,mass)];
measured += peakSpectrum_copy_I[frag][(precursor,mass)];
measured_spec[masses[i]] = measured;
corrected_spec[masses[i]] = corrected;
intensityList.append(corrected);
peakSpectrum_measured[frag] = measured_spec;
peakSpectrum_corrected[frag] = corrected_spec;
# normalize each spectrum:
#NOTE: normalization by max to allow for later conversion to normalization by sum
normalized = {};
intensityListMax = max(intensityList);
for k,v in peakSpectrum_corrected[frag].items():
if intensityListMax != 0: normalized[k] = v/intensityListMax;
else: normalized[k] = None;
peakSpectrum_normalized[frag] = normalized;
return peakSpectrum_measured, peakSpectrum_corrected, peakSpectrum_normalized;
def compare_peakSpectrum_normMax(self,peakSpectrum_normalized_list_I,return_theoretical = False):
# Input:
# peakSpectrum_normalized_list_I = [{fragment:{mass:intensity}}]
# Output:
# peakSpectrum_stats_O = {fragment:{mass:{'n':integer,
# 'mean':fraction,
# 'stdDev':fraction,
# 'absDev':fraction}}
fragments_all = [];
for row in peakSpectrum_normalized_list_I:
fragments_all.extend(list(row.keys()));
fragments_I = list(set(fragments_all));
#fragments_I = peakSpectrum_normalized_list_I[0].keys();
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
peakSpectrum_stats_O = {};
for frag in fragments_I:
peakSpectrum_stats_O[frag] = {'n':None,
'mean':None,
'stdDev':None,
'absDev':None};
if not peakSpectrum_theoretical[frag]: continue; # no carbons in fragment
intensityList = [];
masses = [];
stats = {};
for peakSpectrum in peakSpectrum_normalized_list_I:
intensityDict = {};
peakSpectrumMasses = list(peakSpectrum_theoretical[frag].keys());
for mass in peakSpectrumMasses:
if frag in peakSpectrum and mass in peakSpectrum[frag] and peakSpectrum[frag][mass] and peakSpectrum[frag][mass] > 0.0:
intensityDict[mass] = peakSpectrum[frag][mass];
else:
intensityDict[mass] = 0.0;
if not mass in masses: masses.append(mass);
intensityList.append(intensityDict);
## uncomment to only compare measured masses
#intensityDict = {};
#peakSpectrumMasses = peakSpectrum[frag].keys();
#for mass in peakSpectrumMasses:
# if peakSpectrum[frag][mass] > 0.0:
# intensityDict[mass] = peakSpectrum[frag][mass];
# if not mass in masses: masses.append(mass);
#intensityList.append(intensityDict);
for mass in masses:
stats[mass] = None;
data = [];
for intensity in intensityList:
if intensity[mass]>0.0:data.append(intensity[mass]);
if data:
intensity_array = numpy.array(data);
if peakSpectrum_theoretical[frag][mass]:abs_dev = abs(intensity_array.mean() - peakSpectrum_theoretical[frag][mass]);
else: abs_dev = None;
stats[mass] = {'n':len(intensity_array),
'mean':intensity_array.mean(),
'stdDev':intensity_array.std(),
'absDev':abs_dev};
else:
stats[mass] = {'n':0.0,
'mean':0.0,
'stdDev':0.0,
'absDev':None};
if stats: peakSpectrum_stats_O[frag] = stats;
if return_theoretical:
return peakSpectrum_stats_O,peakSpectrum_theoretical;
else:
return peakSpectrum_stats_O;
def compare_peakSpectrum_normSum(self,peakSpectrum_normalized_list_I,return_theoretical = False):
# Input:
# peakSpectrum_normalized_list_I = [{fragment:{mass:[measuredMass,intensity]}}]
# Output:
# peakSpectrum_stats_O = {fragment:{mass:{'n':integer,
# 'mean':fraction,
# 'stdDev':fraction,
# 'absDev':fraction}}
fragments_all = [];
for row in peakSpectrum_normalized_list_I:
fragments_all.extend(list(row.keys()));
fragments_I = list(set(fragments_all));
#fragments_I = peakSpectrum_normalized_list_I[0].keys();
peakSpectrum_theoretical = self.report_fragmentSpectrum_normSum(fragments_I,True);
peakSpectrum_stats_O = {};
for frag in fragments_I:
peakSpectrum_stats_O[frag] = {'n':None,
'mean':None,
'stdDev':None,
'absDev':None};
if not peakSpectrum_theoretical[frag]: continue; # no carbons in fragment
intensityList = [];
masses = [];
stats = {};
for peakSpectrum in peakSpectrum_normalized_list_I:
intensityDict = {};
peakSpectrumMasses = list(peakSpectrum_theoretical[frag].keys());
for mass in peakSpectrumMasses:
if frag in peakSpectrum and frag in peakSpectrum and mass in peakSpectrum[frag] and peakSpectrum[frag][mass] and peakSpectrum[frag][mass] > 0.0:
intensityDict[mass] = peakSpectrum[frag][mass];
else:
intensityDict[mass] = 0.0;
if not mass in masses: masses.append(mass);
intensityList.append(intensityDict);
## uncomment to only compare measured masses
#intensityDict = {};
#peakSpectrumMasses = peakSpectrum[frag].keys();
#for mass in peakSpectrumMasses:
# if peakSpectrum[frag][mass] > 0.0:
# intensityDict[mass] = peakSpectrum[frag][mass];
# if not mass in masses: masses.append(mass);
#intensityList.append(intensityDict);
for mass in masses:
stats[mass] = None;
data = [];
for intensity in intensityList:
if intensity[mass]>0.0:data.append(intensity[mass]);
if data:
intensity_array = numpy.array(data);
if peakSpectrum_theoretical[frag][mass]:abs_dev = abs(intensity_array.mean() - peakSpectrum_theoretical[frag][mass]);
else: abs_dev = None;
stats[mass] = {'n':len(intensity_array),
'mean':intensity_array.mean(),
'stdDev':intensity_array.std(),
'absDev':abs_dev};
else:
stats[mass] = {'n':0.0,
'mean':0.0,
'stdDev':0.0,
'absDev':None};
if stats: peakSpectrum_stats_O[frag] = stats;
if return_theoretical:
return peakSpectrum_stats_O,peakSpectrum_theoretical;
else:
return peakSpectrum_stats_O;
def report_fragmentSpectrum_normMax(self,fragments_I,round_mass=False):
'''calculate the format spectrum as a list'''
# Input: formula_str_I
# Output: spectrum_lst_O
fragmentSpectrum_tmp = {};
fragmentSpectrum_O = {};
for formula_str_I in fragments_I:
fragmentSpectrum_tmp[formula_str_I] = None;
fragmentSpectrum_O[formula_str_I] = None;
formula_str = re.sub('[+-]', '', formula_str_I);
n12C = 0
n13C = 0
if 'C' not in Formula(formula_str)._elements: continue; #check if a carbon is even contained in the formula
if 0 in Formula(formula_str)._elements['C']:
n12C += Formula(formula_str)._elements['C'][0]; #get the # of Carbons
if 13 in Formula(formula_str)._elements['C']:
n13C += Formula(formula_str)._elements['C'][13]
mnumber = Formula(formula_str).isotope.massnumber #get the nominal mass number
spectrum = Formula(formula_str).spectrum() #get the spectrum
fragmentSpectrum = {}
intensityList = [];
for c in range(-n13C, n12C + 1):
if c<0:
fragmentSpectrum[Formula(formula_str).isotope.mass-1]=0.0;
intensityList.append(0.0);
else:
if mnumber+c in spectrum:
fragmentSpectrum[spectrum[mnumber+c][0]]=spectrum[mnumber+c][1];
intensityList.append(spectrum[mnumber+c][1]);
else:
fragmentSpectrum[Formula(formula_str).isotope.mass + c]=0.0;
intensityList.append(0.0);
fragmentSpectrum_tmp[formula_str_I] = fragmentSpectrum;
# by default, the spectrum is normalized to the sum of all intensities measured
# convert sum-normalized spectrum to max-normalized spectrum
intensityListMax = max(intensityList);
fragmentSpectrum = {};
for k,v in fragmentSpectrum_tmp[formula_str_I].items():
if round_mass:
fragmentSpectrum[int(numpy.round(k))] = v/intensityListMax;
else:
fragmentSpectrum[k] = v/intensityListMax;
fragmentSpectrum_O[formula_str_I] = fragmentSpectrum;
return fragmentSpectrum_O;
def report_fragmentSpectrum_normSum(self,fragments_I,round_mass=False):
'''calculate the fragment spectrum'''
# Input: formula_str_I
# Output: spectrum_lst_O
fragmentSpectrum_O = {};
for formula_str_I in fragments_I:
fragmentSpectrum_O[formula_str_I] = None;
formula_str = re.sub('[+-]', '', formula_str_I);
n12C = 0
n13C = 0
if 'C' not in Formula(formula_str)._elements: break; #check if a carbon is even contained in the formula
if 0 in Formula(formula_str)._elements['C']:
n12C += Formula(formula_str)._elements['C'][0]; #get the # of Carbons
if 13 in Formula(formula_str)._elements['C']:
n13C += Formula(formula_str)._elements['C'][13]
mnumber = Formula(formula_str).isotope.massnumber #get the nominal mass number
spectrum = Formula(formula_str).spectrum() #get the spectrum
fragmentSpectrum = {}
for c in range(-n13C, n12C + 1):
if c<0:
exact_mass = Formula(formula_str).isotope.mass+c;
if round_mass:
fragmentSpectrum[int(numpy.round(exact_mass))]=0.0;
else:
fragmentSpectrum[exact_mass]=0.0;
else:
if mnumber+c in spectrum:
exact_mass = spectrum[mnumber+c][0];
if round_mass:
fragmentSpectrum[int(numpy.round(exact_mass))]=spectrum[mnumber+c][1];
else:
fragmentSpectrum[exact_mass]=spectrum[mnumber+c][1];
else:
exact_mass = Formula(formula_str).isotope.mass + c
if round_mass:
fragmentSpectrum[int(numpy.round(exact_mass))]=0.0;
else:
fragmentSpectrum[exact_mass]=0.0;
fragmentSpectrum_O[formula_str_I] = fragmentSpectrum;
return fragmentSpectrum_O;
def extract_peakData_normMax(self, peakData_I, fragments_I, res_I=0.3, round_mass=False):
'''extract maximum intensity peak'''
# Input: peakData_I = mass:intensity
# res_I = mass window/resolution (default = 0.3);
# Output:
# peakSpectrum_theoretical = {fragment:{mass:intensity}}
# peakSpectrum_measured = {fragment:{mass:intensity}}
# peakSpectrum_corrected = {fragment:{mass:intensity}}
# peakSpectrum_normalized = {fragment:{mass:intensity}}
'''The algorithm implement below does not track the peak width for calculation of peak area,
nor for calculate of resolution using FWHM. However, compared to peak-picking algorithm
implemented in analyst(r) and peakView(r), the intensities for most compounds match
the intensities calculated as peaks (compare 140228_MRM_EPI/..._EPI to ..._EPI_peakList
or 140228_ER_EPI/...I to ..._ER).'''
# min peak height
detectionThreshold = 2500.0
# pre-sort for efficiency
# sort masses in peakData
keys = list(peakData_I.keys());
keys.sort();
# determine baseline intensity
# based on the most occuring intensity (background threshold);
values = numpy.array(list(peakData_I.values()));
values_median = mode(values)[0];
if len(values_median) > 1:
baseline = float(max(values_median)); # min returned too much junk
else:
baseline = float(values_median);
if round_mass:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
else:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I);
# determine masses from fragments
masses = [];
peakSpectrum_measured_qcqa = {};
peakSpectrum_normalized_qcqa = {};
peakSpectrum_corrected_qcqa = {};
peakSpectrum_measured = {};
peakSpectrum_normalized = {};
peakSpectrum_corrected = {};
for frag,spec in peakSpectrum_theoretical.items():
peakSpectrum_measured_qcqa[frag] = None;
peakSpectrum_corrected_qcqa[frag] = None;
peakSpectrum_normalized_qcqa[frag] = None;
peakSpectrum_measured[frag] = None;
peakSpectrum_corrected[frag] = None;
peakSpectrum_normalized[frag] = None;
if not spec: continue; #check if a carbon is even contained in the fragment
masses = list(spec.keys());
masses.sort(); # sort mass in massList
keyIndex = 0;
keyMax = len(keys);
measured_qcqa = {};
measured = {};
for mass in masses: # iterate through each mass
maxPeak = 0.0;
keyMaxPeak = None;
measured_qcqa[mass] = [keyMaxPeak,maxPeak];
measured[mass] = maxPeak;
while keyIndex<keyMax:
if keys[keyIndex] >= mass - res_I and keys[keyIndex] < mass + res_I:
peak = peakData_I[keys[keyIndex]];
if peak > maxPeak:
maxPeak = peak;
keyMaxPeak = keys[keyIndex];
keyIndex += 1;
elif keys[keyIndex] < mass - res_I:
keyIndex += 1;
continue;
elif keys[keyIndex] >= mass + res_I:
measured_qcqa[mass] = [keyMaxPeak,maxPeak];
measured[mass] = maxPeak;
break;
if measured:
peakSpectrum_measured_qcqa[frag] = measured_qcqa;
peakSpectrum_measured[frag] = measured;
else: break #no peaks were found for the fragment
# correct intensity for background:
corrected_qcqa = {};
#intensityList = [];
for k,v in peakSpectrum_measured_qcqa[frag].items():
if v[1] > detectionThreshold:
if v[1] - baseline > 0.0:
corrected_qcqa[k] = [v[0],v[1] - baseline];
else:
corrected_qcqa[k] = [v[0],0.0];
else:
corrected_qcqa[k] = [v[0],0.0];
#intensityList.append(corrected_qcqa[k][1]);
peakSpectrum_corrected_qcqa[frag] = corrected_qcqa
corrected = {};
intensityList = [];
for k,v in peakSpectrum_measured[frag].items():
if v > detectionThreshold:
if v - baseline > 0.0:
corrected[k] = v - baseline;
else:
corrected[k] = 0.0;
intensityList.append(corrected[k]);
else:
corrected[k] = 0.0;
intensityList.append(corrected[k]);
peakSpectrum_corrected[frag] = corrected;
# normalize each spectrum:
normalized_qcqa = {};
intensityListMax_qcqa = max(intensityList);
for k,v in peakSpectrum_corrected_qcqa[frag].items():
if intensityListMax_qcqa != 0: normalized_qcqa[k] = [v[0],v[1]/intensityListMax_qcqa];
else: normalized_qcqa[k] = [v[0], None];
peakSpectrum_normalized_qcqa[frag] = normalized_qcqa;
normalized = {};
intensityListMax = max(intensityList);
for k,v in peakSpectrum_corrected[frag].items():
if intensityListMax != 0: normalized[k] = v/intensityListMax;
else: normalized[k] = None;
peakSpectrum_normalized[frag] = normalized;
return peakSpectrum_measured, peakSpectrum_corrected, peakSpectrum_normalized;
def extract_peakData_normSum(self, peakData_I, fragments_I, res_I=0.3,round_mass=False):
'''extract maximum intensity peak'''
# Input: peakData_I = mass:intensity
# res_I = mass window/resolution (default = 0.3);
# Output:
# peakSpectrum_theoretical = {fragment:{mass:intensity}}
# peakSpectrum_measured = {fragment:{mass:intensity}}
# peakSpectrum_corrected = {fragment:{mass:intensity}}
# peakSpectrum_normalized = {fragment:{mass:intensity}}
# min peak height
detectionThreshold = 1000.0
# pre-sort for efficiency
# sort masses in peakData
keys = list(peakData_I.keys());
keys.sort();
# determine baseline intensity
# based on the most occuring intensity (background threshold);
values = numpy.array(list(peakData_I.values()));
values_median = mode(values)[0];
if len(values_median) > 1:
baseline = float(max(values_median)); # min returned too much junk
else:
baseline = float(values_median);
if round_mass:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
else:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I);
# determine masses from fragments
masses = [];
peakSpectrum_measured_qcqa = {};
peakSpectrum_normalized_qcqa = {};
peakSpectrum_corrected_qcqa = {};
peakSpectrum_measured = {};
peakSpectrum_normalized = {};
peakSpectrum_corrected = {};
for frag,spec in peakSpectrum_theoretical.items():
peakSpectrum_measured_qcqa[frag] = None;
peakSpectrum_corrected_qcqa[frag] = None;
peakSpectrum_normalized_qcqa[frag] = None;
peakSpectrum_measured[frag] = None;
peakSpectrum_corrected[frag] = None;
peakSpectrum_normalized[frag] = None;
if not spec: continue; #check if a carbon is even contained in the fragment
masses = list(spec.keys());
masses.sort(); # sort mass in massList
keyIndex = 0;
keyMax = len(keys);
measured_qcqa = {};
measured = {};
for mass in masses: # iterate through each mass
maxPeak = 0.0;
keyMaxPeak = None;
measured_qcqa[mass] = [keyMaxPeak,maxPeak];
measured[mass] = maxPeak;
while keyIndex<keyMax:
if keys[keyIndex] >= mass - res_I and keys[keyIndex] < mass + res_I:
peak = peakData_I[keys[keyIndex]];
if peak > maxPeak:
maxPeak = peak;
keyMaxPeak = keys[keyIndex];
keyIndex += 1;
elif keys[keyIndex] < mass - res_I:
keyIndex += 1;
continue;
elif keys[keyIndex] >= mass + res_I:
measured_qcqa[mass] = [keyMaxPeak,maxPeak];
measured[mass] = maxPeak;
break;
if measured:
peakSpectrum_measured_qcqa[frag] = measured_qcqa;
peakSpectrum_measured[frag] = measured;
else: break #no peaks were found for the fragment
# correct intensity for background:
corrected_qcqa = {};
#intensityList = [];
for k,v in peakSpectrum_measured_qcqa[frag].items():
if v[1] > detectionThreshold:
if v[1] - baseline > 0.0:
corrected_qcqa[k] = [v[0],v[1] - baseline];
else:
corrected_qcqa[k] = [v[0],0.0];
else:
corrected_qcqa[k] = [v[0],0.0];
#intensityList.append(corrected_qcqa[k][1]);
peakSpectrum_corrected_qcqa[frag] = corrected_qcqa
corrected = {};
intensityList = [];
for k,v in peakSpectrum_measured[frag].items():
if v > detectionThreshold:
if v - baseline > 0.0:
corrected[k] = v - baseline;
else:
corrected[k] = 0.0;
intensityList.append(corrected[k]);
else:
corrected[k] = 0.0;
intensityList.append(corrected[k]);
peakSpectrum_corrected[frag] = corrected;
# normalize each spectrum:
normalized_qcqa = {};
intensityListSum_qcqa = sum(intensityList);
for k,v in peakSpectrum_corrected_qcqa[frag].items():
if intensityListSum_qcqa != 0: normalized_qcqa[k] = [v[0],v[1]/intensityListSum_qcqa];
else: normalized_qcqa[k] = [v[0], None];
peakSpectrum_normalized_qcqa[frag] = normalized_qcqa;
normalized = {};
intensityListSum = sum(intensityList);
for k,v in peakSpectrum_corrected[frag].items():
if intensityListSum != 0: normalized[k] = v/intensityListSum;
else: normalized[k] = None;
peakSpectrum_normalized[frag] = normalized;
return peakSpectrum_measured, peakSpectrum_corrected, peakSpectrum_normalized;
def extract_peakList_normMax(self, peakSpectrum_I, fragments_I, round_mass=False):
'''extract peak spectrum from peak list'''
# Input:
# peakSpectrum_I = {fragment:{(precursor_mass,product_mass):intensity}}
# fragments_I = [fragments]
# Output:
# peakSpectrum_corrected = {fragment:{mass:intensity}}
# peakSpectrum_normalized = {fragment:{mass:intensity}}
# round all precursor/product masses in input for comparison:
peakSpectrum_copy_I = {};
for frag,spec in peakSpectrum_I.items():
peakSpectrum_tmp = {};
for masses,intensity in spec.items():
peakSpectrum_tmp[numpy.around(masses)] = intensity;
peakSpectrum_copy_I[frag] = peakSpectrum_tmp;
if round_mass:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
else:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I);
# determine masses from fragments
masses = [];
peakSpectrum_normalized = {};
peakSpectrum_corrected = {};
for frag,spec in peakSpectrum_theoretical.items():
peakSpectrum_corrected[frag] = None;
peakSpectrum_normalized[frag] = None;
if not spec: continue; #check if a carbon is even contained in the fragment
masses = list(spec.keys());
masses.sort(); # sort mass in massList
masses_rounded = numpy.around(masses); # round masses to nearest digit for comparison
# 1. copy data from peakSpectrum_I to peakSpectrum_corrected based on theoretical fragments
intensityList = [];
if frag in peakSpectrum_I:
fragment_masses = [k for k in peakSpectrum_copy_I[frag].keys()];
corrected_spec = {};
for i,mass in enumerate(masses_rounded):
corrected = 0.0; #added on 12/30/15
if mass in fragment_masses:
corrected = peakSpectrum_copy_I[frag][mass];
if not corrected: corrected = 0.0;
corrected_spec[masses[i]] = corrected;
intensityList.append(corrected);
peakSpectrum_corrected[frag] = corrected_spec;
else:
corrected_spec = {};
for i,mass in enumerate(masses_rounded):
corrected = 0.0;
corrected_spec[masses[i]] = corrected;
intensityList.append(corrected);
peakSpectrum_corrected[frag] = corrected_spec;
# normalize each spectrum:
#NOTE: normalization by max to allow for later conversion to normalization by sum
normalized = {};
intensityListMax = max(intensityList);
for k,v in peakSpectrum_corrected[frag].items():
if v:
if intensityListMax != 0: normalized[k] = v/intensityListMax;
else: normalized[k] = None;
else: normalized[k] = None;
peakSpectrum_normalized[frag] = normalized;
return peakSpectrum_corrected, peakSpectrum_normalized;
def extract_peakList_normSum(self, peakSpectrum_I, fragments_I, round_mass=False):
'''extract peak spectrum from peak list'''
# Input:
# peakSpectrum_I = {fragment:{mass:intensity}}
# fragments_I = [fragments]
# Output:
# peakSpectrum_corrected = {fragment:{mass:intensity}}
# peakSpectrum_normalized = {fragment:{mass:intensity}}
# round all precursor/product masses in input for comparison:
peakSpectrum_copy_I = {};
for frag,spec in peakSpectrum_I.items():
peakSpectrum_tmp = {};
for masses,intensity in spec.items():
peakSpectrum_tmp[numpy.around(masses)] = intensity;
peakSpectrum_copy_I[frag] = peakSpectrum_tmp;
if round_mass:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normSum(fragments_I,True);
else:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normSum(fragments_I);
# determine masses from fragments
masses = [];
peakSpectrum_normalized = {};
peakSpectrum_corrected = {};
for frag,spec in peakSpectrum_theoretical.items():
peakSpectrum_corrected[frag] = None;
peakSpectrum_normalized[frag] = None;
if not spec: continue; #check if a carbon is even contained in the fragment
masses = list(spec.keys());
masses.sort(); # sort mass in massList
masses_rounded = numpy.around(masses); # round masses to nearest digit for comparison
# 1. copy data from peakSpectrum_I to peakSpectrum_corrected based on theoretical fragments
intensityList = [];
if frag in peakSpectrum_I:
fragment_masses = [k for k in peakSpectrum_copy_I[frag].keys()];
corrected_spec = {};
for i,mass in enumerate(masses_rounded):
corrected = 0.0;
if mass in fragment_masses and peakSpectrum_copy_I[frag][mass]:
corrected = peakSpectrum_copy_I[frag][mass];
corrected_spec[masses[i]] = corrected;
intensityList.append(corrected);
peakSpectrum_corrected[frag] = corrected_spec;
else:
corrected_spec = {};
for i,mass in enumerate(masses_rounded):
corrected = 0.0;
corrected_spec[masses[i]] = corrected;
intensityList.append(corrected);
peakSpectrum_corrected[frag] = corrected_spec;
# normalize each spectrum:
normalized = {};
intensityListSum = sum(intensityList);
for k,v in peakSpectrum_corrected[frag].items():
if v>0.0:
if intensityListSum != 0: normalized[k] = v/intensityListSum;
else: normalized[k] = None;
else: normalized[k] = None;
peakSpectrum_normalized[frag] = normalized;
return peakSpectrum_corrected, peakSpectrum_normalized;
def recombine_dilutionsMRMs(self,peakData_I):
'''Method to "recombine" MRMs from one dilution to the next'''
# input: peakData_I = {frag:[mass:{'intensity':intensity,
# 'dilution':dilution,
# 'used_':used_,
# 'comment_':comment_}]}
# e.g.: {frag:[100:{'dilution':'high',...}],
# [101:{'dilution':'low','comment_':'Recombine',...}],
# [101:{'dilution':'high','comment_':'Recombine',...}],
# [102:{'dilution':'low','comment_':'Recombine',...}],
# [103:{'dilution':'low',...}],...}
# NOTE: dictionary > List of dictionaries
# NOTE: input list of masses must be sorted in ascending order
# followed by 'dilutions' in descending order as shown below!
# output: peakData_O = {frag:{mass:{'intensity':intensity,
# 'dilution':dilution,
# 'used_':used_,
# 'comment_':comment_}}}
# peakData_O_false = {frag:{mass:{'intensity':intensity,
# 'dilution':dilution,
# 'used_':used_,
# 'comment_':comment_}}}
# Note: second output structure needed to update rows that are changed to false
'''Algorithm:
start:
dilution m comment used
'low' 0 '' false
'high' 0 '' true
'low' 1 'Recombine' true
'high' 1 'Recombine' true
'low' 2 'Recombine' true
'high' 2 '' false
'low' 3 '' true
'high' 3 '' false
recombine...
end:
dilution m comment used
'low' 0 '' false
'high' 0 '' true
'low' 1 'Recombine' false
'high' 1 'Recombine' true
'low' 2 'Recombine' true
'high' 2 '' false
'low' 3 '' true
'high' 3 '' false
...
done prior: set normalized intensity to diluion 'low', m 1 to 1;
recalculate the rest of the normalized intensities for the dilutions 'low', m 2,3,4,...;
calculate the percent change from dilution 'low', m 1 to dilution 'low', m 2; from dilution 'low', m 2 to dilution 'low', m 3; ...;
replace dilution 'high', m 2 with the normalized intensity for dilution 'low', m 1 - the percent change from dilution 'low', m 1 to dilution 'low', m 2;
replace dilution 'low', m 3 with the new normalized intensity for m 2 - the percent change from dilution 'low', m 2 to dilution 'low', m 3;
...;'''
peakData_O = {};
peakData_O_false = {};
#iterate through each fragment
for frag,spec in peakData_I.items():
peakData_O[frag] = None;
peakData_O_false[frag] = None;
spec_O = {};
spec_O_false = {};
if not spec: continue; #check if there is data for the fragment
# extract out dilutions
dilutions = [];
for d in spec:
values = list(d.values())[0];
dilutions.append(values['dilution']);
dilutions = list(set(dilutions));
dilutions.sort();
dilutions_dict = dict(list(zip(dilutions,['low','high'])));
#iterate through each spectrum
intensity_prev = 0.0
intensity_new = 0.0;
intensity_difference = 0.0;
recombine_cnt = 0;
for spec_dict in spec:
mass = list(spec_dict.keys())[0];
data = list(spec_dict.values())[0];
spec_O[mass] = None;
data_O = {};
if not data['intensity']:
data_O['dilution'] = None;
data_O['intensity'] = None;
data_O['comment_'] = None;
data_O['used_'] = None;
spec_O[mass] = data_O;
continue;
if data['comment_'] == 'Recombine':
if recombine_cnt == 0: # 1st recombination event
if dilutions_dict[data['dilution']] != 'low': print('bad input');
intensity_prev = data['intensity'];
data['used_'] = False;
# copy the data
data_O['dilution'] = data['dilution'];
data_O['intensity'] = data['intensity'];
data_O['comment_'] = data['comment_'];
data_O['used_'] = data['used_'];
spec_O_false[mass] = data_O;
recombine_cnt += 1;
continue
elif recombine_cnt == 1: # 2nd recombination event
if dilutions_dict[data['dilution']] != 'high': print('bad input');
intensity_new = data['intensity'];
recombine_cnt += 1;
elif recombine_cnt == 2: # 3rd recombination event
if dilutions_dict[data['dilution']] != 'low': print('bad input');
intensity_difference = data['intensity']/intensity_prev;
intensity_prev = data['intensity'];
intensity_new = intensity_new*intensity_difference;
data['intensity'] = intensity_new;
recombine_cnt += 1;
elif recombine_cnt >= 3:
if dilutions_dict[data['dilution']] != 'low': print('bad input');
intensity_difference = data['intensity']/intensity_prev;
intensity_prev = data['intensity'];
intensity_new = intensity_new*intensity_difference;
data['intensity'] = intensity_new;
recombine_cnt += 1;
# copy data
data_O['dilution'] = data['dilution'];
data_O['intensity'] = data['intensity'];
data_O['comment_'] = data['comment_'];
data_O['used_'] = data['used_'];
spec_O[mass] = data_O;
# copy spectrum
peakData_O[frag] = spec_O
peakData_O_false[frag] = spec_O_false
#copy out the intensities without the comments
peakData_intensities_O = {};
for frag,spec in peakData_O.items():
spec_tmp = {};
for mass,v in spec.items():
spec_tmp[mass]=v['intensity'];
peakData_intensities_O[frag] = spec_tmp;
return peakData_O,peakData_O_false,peakData_intensities_O;
def normalize_peakSpectrum_normMax(self,peakSpectrum_I,scalingFactors_I):
'''normalize peakSpectrum taken from different m+0, m+1, ... fragments
using a reference scaling factor'''
# Input:
# peakSpectrum_I = {precursor_fragment:{product_fragment:{product_mass:intensity}}}
# scalingFactors_I = {precursor_fragment:intensity}
# Output:
# peakSpectrum_normalized = {product_fragment:{mass:intensity}}
'''Algorithm:
part 1: scale
for each precursor i:
for each product j in precursor i:
for each mass m in product j:
peakSpectrum[precursor_i][product_j][m]*scalingFactor[precursor_i]
part 2: reduce:
for each product j in all precursors:
for each mass in product j:
for each precursor i with product j:
peakSpectrum_O[product_j][m] += peakSpectrum[precursor_i][product_j][m]*scalingFactor[precursor_i]
'''
precursor_fragments_I = list(peakSpectrum_I.keys());
precursorSpectrum_dict = {};
product_fragments_all = [];
product_mass_all = [];
# iterate through each precursor fragment
for precursor in precursor_fragments_I:
product_fragments_I = list(peakSpectrum_I[precursor].keys());
productSpectrum_dict = {};
product_fragments_all.extend(product_fragments_I);
# iterate through each product fragment
for product in product_fragments_I:
spectrum_dict = {};
product_mass_dict = {};
product_mass_tmp = [];
# iterate through each mass
for k,v in peakSpectrum_I[precursor][product].items():
if peakSpectrum_I[precursor][product][k]:
spectrum_dict[k] = peakSpectrum_I[precursor][product][k]*scalingFactors_I[precursor];
else:
spectrum_dict[k] = 0.0;
product_mass_tmp.append(k);
productSpectrum_dict[product] = spectrum_dict;
product_mass_dict[product] = product_mass_tmp;
product_mass_all.append(product_mass_dict);
precursorSpectrum_dict[precursor] = productSpectrum_dict
# reduce product fragments list
product_fragments_reduced = list(set(product_fragments_all));
# reduce product masses
product_mass_combined = {};
product_mass_reduced = {};
for product in product_fragments_all:
product_mass_combined[product] = [];
for product_mass in product_mass_all:
if product in product_mass:
product_mass_combined[product].extend(product_mass[product]);
product_mass_reduced[product] = list(set(product_mass_combined[product]));
peakSpectrum_normalized_O = {};
# iterate through all common product fragments
for product in product_fragments_reduced:
peakSpectrum_normalized_O[product] = None;
peakSpectrum_normalized_tmp = {};
# iterate through each mass
for mass in product_mass_reduced[product]:
peakSpectrum_normalized_tmp[mass] = 0.0;
# iterate through each precursor
for precursor in precursor_fragments_I:
if product in precursorSpectrum_dict[precursor]:
if mass in precursorSpectrum_dict[precursor][product]:
peakSpectrum_normalized_tmp[mass] += precursorSpectrum_dict[precursor][product][mass]
else:
peakSpectrum_normalized_tmp[mass] += 0.0;
else: peakSpectrum_normalized_tmp[mass] += 0.0;
peakSpectrum_normalized_O[product] = peakSpectrum_normalized_tmp;
# re-normalize the spectrum to max-normalized spectrum
intensityListMax = {};
peakSpectrum_normalized_O_max = {};
for product,spec in peakSpectrum_normalized_O.items():
intensityList = [];
for mass,intensity in spec.items():
intensityList.append(intensity);
intensityListMax = max(intensityList);
fragmentSpectrum = {};
for mass,intensity in spec.items():
if intensityListMax != 0.0:
fragmentSpectrum[mass] = intensity/intensityListMax;
else:
fragmentSpectrum[mass] = 0.0;
peakSpectrum_normalized_O_max[product] = fragmentSpectrum;
return peakSpectrum_normalized_O_max
def calculate_fragmentSpectrumAccuracy(self, peakSpectrum_normalized_list_I):
'''calculate the accuracy from the normalized intensity
Method:
spectrum accuracy = mean(abs(measured,a-theoretical,a),...) for all masses of a in the spectrum
Input:
peakSpectrum_normalized_list_I = [{fragment:{mass:intensity}}]
Output:
peakSpectrum_accuracy_O = {fragment:float};
'''
fragments_I = list(peakSpectrum_normalized_list_I[0].keys());
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
peakSpectrum_accuracy_O = {};
for frag in fragments_I:
peakSpectrum_accuracy_O[frag] = None;
if not peakSpectrum_theoretical[frag]: continue; # no carbons in fragment
intensityList = [];
masses = [];
for peakSpectrum in peakSpectrum_normalized_list_I:
intensityDict = {};
peakSpectrumMasses = list(peakSpectrum_theoretical[frag].keys());
for mass in peakSpectrumMasses:
if frag in peakSpectrum and mass in peakSpectrum[frag] and peakSpectrum[frag][mass] > 0.0:
intensityDict[mass] = peakSpectrum[frag][mass];
else:
intensityDict[mass] = 0.0;
if not mass in masses: masses.append(mass);
intensityList.append(intensityDict);
## uncomment to only compare measured masses
#intensityDict = {};
#peakSpectrumMasses = peakSpectrum[frag].keys();
#for mass in peakSpectrumMasses:
# if peakSpectrum[frag][mass] > 0.0:
# intensityDict[mass] = peakSpectrum[frag][mass];
# if not mass in masses: masses.append(mass);
#intensityList.append(intensityDict);
accuracyLst = [];
for mass in masses:
data = [];
for intensity in intensityList:
if intensity[mass]>=0.0:data.append(intensity[mass]);
if data and peakSpectrum_theoretical[frag][mass]:
intensity_array = numpy.array(data);
accuracyLst.append(abs(intensity_array.mean() - peakSpectrum_theoretical[frag][mass]))
accuracyLstMean = None;
if accuracyLst:
accuracyLstMean = numpy.mean(accuracyLst);
peakSpectrum_accuracy_O[frag] = accuracyLstMean;
else: peakSpectrum_accuracy_O[frag] = None;
return peakSpectrum_accuracy_O;
def calculate_fragmentSpectrumAccuracy_normSum(self, peakSpectrum_normalized_list_I):
'''calculate the accuracy from the normalized intensity
Method:
spectrum accuracy = mean(abs(measured,a-theoretical,a),...) for all masses of a in the spectrum
Input:
peakSpectrum_normalized_list_I = [{fragment:{mass:intensity}}]
Output:
peakSpectrum_accuracy_O = {fragment:float};
'''
fragments_I = list(peakSpectrum_normalized_list_I[0].keys());
peakSpectrum_theoretical = self.report_fragmentSpectrum_normSum(fragments_I,True);
peakSpectrum_accuracy_O = {};
for frag in fragments_I:
peakSpectrum_accuracy_O[frag] = None;
if not peakSpectrum_theoretical[frag]: continue; # no carbons in fragment
intensityList = [];
masses = [];
for peakSpectrum in peakSpectrum_normalized_list_I:
intensityDict = {};
peakSpectrumMasses = list(peakSpectrum_theoretical[frag].keys());
for mass in peakSpectrumMasses:
if frag in peakSpectrum and mass in peakSpectrum[frag] and peakSpectrum[frag][mass] > 0.0:
intensityDict[mass] = peakSpectrum[frag][mass];
else:
intensityDict[mass] = 0.0;
if not mass in masses: masses.append(mass);
intensityList.append(intensityDict);
## uncomment to only compare measured masses
#intensityDict = {};
#peakSpectrumMasses = peakSpectrum[frag].keys();
#for mass in peakSpectrumMasses:
# if peakSpectrum[frag][mass] > 0.0:
# intensityDict[mass] = peakSpectrum[frag][mass];
# if not mass in masses: masses.append(mass);
#intensityList.append(intensityDict);
accuracyLst = [];
for mass in masses:
data = [];
for intensity in intensityList:
if intensity[mass]>=0.0:data.append(intensity[mass]);
if data and peakSpectrum_theoretical[frag][mass]:
intensity_array = numpy.array(data);
accuracyLst.append(abs(intensity_array.mean() - peakSpectrum_theoretical[frag][mass]))
accuracyLstMean = None;
if accuracyLst:
accuracyLstMean = numpy.mean(accuracyLst);
peakSpectrum_accuracy_O[frag] = accuracyLstMean;
else: peakSpectrum_accuracy_O[frag] = None;
return peakSpectrum_accuracy_O;
def make_CSourceMix(self,csources_I, composition_I):
'''Make a carbon source mix of a specified composition'''
# Input: (e.g. 80/20 1-13C/U-13C glc)
# csources_I = backbone of the csources [['[13C]HO','CH2O','CH2O','CH2O','CH2O','CH3O'],
# ['[13C]HO','[13C]H2O','[13C]H2O','[13C]H2O','[13C]H2O','[13C]H3O']]
# composition_I = composition csources [0.8,0.2]
# Output:
# emu_O = {strings of emu distribution: spectral list}
emu_O = {};
emu_all = [];
ncsources = len(csources_I)
for cs in csources_I:
emu_tmp = {};
emu_tmp = self.make_EMUDistributionAndCSpectra(cs)
emu_all.append(emu_tmp);
for k in list(emu_all[0].keys()):
spectra_tmp = [];
spectra_tmp = [0.0]*len(emu_all[0][k])
for i in range(ncsources):
for j in range(len(emu_all[i][k])):
spectra_tmp[j] += composition_I[i]*emu_all[i][k][j];
emu_O[k] = spectra_tmp;
return emu_O;
def make_EMUDistributionAndCSpectra(self,csource_I):
'''Make EMU distribution based on the carbon source'''
# Input:
# csource_I = carbon backbone of the csource
# e.g. 1-13C glc = ['[13C]HO','CH2','CH2','CH2','CH2','CH3O']
# U-13C glc = ['[13C]HO','[13C]H2O','[13C]H2O','[13C]H2O','[13C]H2O','[13C]H3O']
# glc = ['CHO','CH2O','CH2O','CH2O','CH2O','CH3O']
# Output:
# emu_O = {strings of emu distribution: spectral list}
nC = len(csource_I)
emu_O = {};
# iterate through each carbon and change from 0 to 1
emu_c = nC*'0'; #intialize
emu_lst = list(emu_c);
for j in range(nC):
emu_lst[j] = '1'
for c in range(j,nC):
emu_lst_2 = copy.copy(emu_lst)
emu_lst_2[j] = '0';
emu_lst_2[c] = '1';
emu_tmp = copy.copy(emu_lst_2);
cfrag = [];
for i in range(c,nC):
emu_tmp[c] = '0';
emu_tmp[i] = '1';
emu_str = 'x' + ''.join(emu_tmp)
dfrag = [csource_I[p] for p,n in enumerate(emu_tmp) if n=='1']
dfrag_tmp = ''.join(dfrag)
#if emu_str.find('0')==-1: #ignore the fully labeled fragment
# continue;
spectrum_tmp = self.report_fragmentSpectrum_normSum([dfrag_tmp],round_mass=True)
# format from dict into a list:
spectrum_tmp_lst = [];
spectrum_masses_lst = [];
for k,v in spectrum_tmp[dfrag_tmp].items():
spectrum_masses_lst.append(k);
spectrum_masses_lst.sort();
for k in spectrum_masses_lst:
spectrum_tmp_lst.append(spectrum_tmp[dfrag_tmp][k]);
emu_O[emu_str] = spectrum_tmp_lst;
emu_c = nC*'1'; #intialize
emu_lst = list(emu_c);
for j in range(nC-1):
emu_lst[j] = '0'
for c in range(j,nC-1):
emu_lst_2 = copy.copy(emu_lst)
emu_lst_2[j] = '1';
emu_lst_2[c] = '0';
emu_tmp = copy.copy(emu_lst_2);
cfrag = [];
for i in range(c,nC-1):
emu_tmp[c] = '1';
emu_tmp[i] = '0';
emu_str = 'x' + ''.join(emu_tmp)
dfrag = [csource_I[p] for p,n in enumerate(emu_tmp) if n=='1']
dfrag_tmp = ''.join(dfrag)
#if emu_str.find('0')==-1: #ignore the fully labeled fragment
# continue;
spectrum_tmp = self.report_fragmentSpectrum_normSum([dfrag_tmp],round_mass=True)
# format from dict into a list:
spectrum_tmp_lst = [];
spectrum_masses_lst = [];
for k,v in spectrum_tmp[dfrag_tmp].items():
spectrum_masses_lst.append(k);
spectrum_masses_lst.sort();
for k in spectrum_masses_lst:
spectrum_tmp_lst.append(spectrum_tmp[dfrag_tmp][k]);
emu_O[emu_str] = spectrum_tmp_lst;
return emu_O;
def make_fragmentID(self,met_id_I,formula_I,mass_I):
"""Make a unique fragment ID"""
fragment_id_O = met_id_I + "_" + formula_I + "_" + str(mass_I);
return fragment_id_O;
def make_sampleFragmentID(self,sample_name_I,met_id_I,formula_I,mass_I):
"""Make a unique fragment ID"""
fragment_id_O = sample_name_I + "_" + met_id_I + "_" + formula_I + "_" + str(mass_I);
return fragment_id_O;
| 48.655172
| 165
| 0.547677
| 6,065
| 62,084
| 5.408904
| 0.058862
| 0.004268
| 0.011797
| 0.021125
| 0.826459
| 0.801463
| 0.770553
| 0.760524
| 0.750221
| 0.734614
| 0
| 0.01157
| 0.356839
| 62,084
| 1,276
| 166
| 48.655172
| 0.809992
| 0.179563
| 0
| 0.763842
| 0
| 0
| 0.010318
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021469
| false
| 0
| 0.00452
| 0.00113
| 0.049718
| 0.00452
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fe84cf3bb9f5ee0d3b96c29f3cd7fd01ebfbb074
| 1,157
|
py
|
Python
|
docker/sane-doc-reports/src/tests/library/elements/test_table.py
|
glicht/dockerfiles
|
7dff92792a7ec6ac1e04950a41927867af5f147c
|
[
"MIT"
] | 1
|
2020-06-16T16:32:42.000Z
|
2020-06-16T16:32:42.000Z
|
docker/sane-doc-reports/src/tests/library/elements/test_table.py
|
glicht/dockerfiles
|
7dff92792a7ec6ac1e04950a41927867af5f147c
|
[
"MIT"
] | 25
|
2018-12-24T22:40:45.000Z
|
2021-06-25T15:26:39.000Z
|
docker/sane-doc-reports/src/tests/library/elements/test_table.py
|
glicht/dockerfiles
|
7dff92792a7ec6ac1e04950a41927867af5f147c
|
[
"MIT"
] | 1
|
2019-02-06T06:39:57.000Z
|
2019-02-06T06:39:57.000Z
|
from docx.table import Table
from sane_doc_reports.populate.Report import Report
from tests import utils
from tests.utils import _transform
def test_table_in_report():
report = Report(*_transform('elements/table.json'))
report.populate_report()
d = report.document
table = next(utils.iter_block_items(d))
assert isinstance(table, Table)
assert len(table.columns) == 12
assert len(table.rows) == 1
# Check that there is indeed an image
assert len(d.element.xpath('//w:tbl//w:tbl')) == 1
# Check that it has the right amount of rows
assert len(d.element.xpath('//w:tbl//w:tbl//w:t')) == 22
def test_table_in_report():
report = Report(*_transform('elements/table_widget.json'))
report.populate_report()
d = report.document
table = next(utils.iter_block_items(d))
assert isinstance(table, Table)
assert len(table.columns) == 12
assert len(table.rows) == 3
# Check that there is indeed an image
assert len(d.element.xpath('//w:tbl//w:tbl')) == 1
# Check that it has the right amount of rows
assert len(d.element.xpath('//w:t[contains(text(), "Eve listens")]')) == 1
| 31.27027
| 78
| 0.6828
| 175
| 1,157
| 4.411429
| 0.314286
| 0.093264
| 0.072539
| 0.088083
| 0.801813
| 0.801813
| 0.801813
| 0.801813
| 0.801813
| 0.792746
| 0
| 0.01164
| 0.183233
| 1,157
| 37
| 78
| 31.27027
| 0.805291
| 0.135696
| 0
| 0.583333
| 0
| 0
| 0.130522
| 0.048193
| 0
| 0
| 0
| 0
| 0.416667
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
22b1257f3cea1d8f7d42abd1b5ef5beb370610f7
| 132,745
|
py
|
Python
|
tests/adapters/switches/cisco_test.py
|
idjaw/netman
|
58ba898de6e450a24b4f1721ce274ad3e12f9d33
|
[
"Apache-2.0"
] | 1
|
2016-01-28T17:56:51.000Z
|
2016-01-28T17:56:51.000Z
|
tests/adapters/switches/cisco_test.py
|
idjaw/netman
|
58ba898de6e450a24b4f1721ce274ad3e12f9d33
|
[
"Apache-2.0"
] | 2
|
2021-12-13T20:55:50.000Z
|
2022-03-29T22:07:13.000Z
|
tests/adapters/switches/cisco_test.py
|
idjaw/netman
|
58ba898de6e450a24b4f1721ce274ad3e12f9d33
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from flexmock import flexmock, flexmock_teardown
from hamcrest import assert_that, has_length, equal_to, is_, instance_of, none, empty
from netaddr import IPNetwork
from netaddr.ip import IPAddress
from netman.adapters.switches import cisco
from netman.adapters.switches.cisco import Cisco, parse_vlan_ranges
from netman.adapters.switches.util import SubShell
from netman.core.objects.access_groups import IN, OUT
from netman.core.objects.exceptions import IPNotAvailable, UnknownVlan, UnknownIP, UnknownAccessGroup, BadVlanNumber, \
BadVlanName, UnknownInterface, UnknownVrf, VlanVrfNotSet, IPAlreadySet, BadVrrpGroupNumber, \
BadVrrpPriorityNumber, VrrpDoesNotExistForVlan, VrrpAlreadyExistsForVlan, BadVrrpTimers, \
BadVrrpTracking, DhcpRelayServerAlreadyExists, UnknownDhcpRelayServer, VlanAlreadyExist, \
UnknownBond, InvalidAccessGroupName
from netman.core.objects.interface_states import OFF, ON
from netman.core.objects.port_modes import ACCESS, TRUNK, DYNAMIC
from netman.core.objects.switch_descriptor import SwitchDescriptor
from netman.core.objects.switch_transactional import FlowControlSwitch
from tests import ignore_deprecation_warnings
@ignore_deprecation_warnings
def test_factory():
lock = mock.Mock()
switch = cisco.factory(SwitchDescriptor(hostname='hostname', model='cisco', username='username', password='password', port=22), lock)
assert_that(switch, instance_of(FlowControlSwitch))
assert_that(switch.wrapped_switch, instance_of(Cisco))
assert_that(switch.lock, is_(lock))
assert_that(switch.switch_descriptor.hostname, equal_to("hostname"))
assert_that(switch.switch_descriptor.model, equal_to("cisco"))
assert_that(switch.switch_descriptor.username, equal_to("username"))
assert_that(switch.switch_descriptor.password, equal_to("password"))
assert_that(switch.switch_descriptor.port, equal_to(22))
class CiscoTest(unittest.TestCase):
def setUp(self):
self.switch = Cisco(SwitchDescriptor(model='cisco', hostname="my.hostname"))
SubShell.debug = True
self.mocked_ssh_client = flexmock()
self.switch.ssh = self.mocked_ssh_client
def tearDown(self):
flexmock_teardown()
def test_switch_has_a_logger_configured_with_the_switch_name(self):
assert_that(self.switch.logger.name, is_(Cisco.__module__ + ".my.hostname"))
def test_get_vlans(self):
self.mocked_ssh_client.should_receive("do").with_args("show vlan brief").once().ordered().and_return([
"VLAN Name Status Ports",
"---- -------------------------------- --------- -------------------------------",
"1 default active Fa0/2, Fa0/3, Fa0/4",
"2222 your-name-is-way-too-long-for-th active",
"2500 no-ip active",
"2998 VLAN2998 active Fa0/1",
"3333 some-name active",
])
self.mocked_ssh_client.should_receive("do").with_args("show ip interface").once().ordered().and_return([
"Vlan2222 is down, line protocol is down",
" Internet protocol processing disabled",
"Vlan2500 is down, line protocol is down",
" Internet protocol processing disabled",
"Vlan2723 is down, line protocol is down",
" Internet protocol processing disabled",
"Vlan2998 is down, line protocol is down",
" Internet protocol processing disabled",
"GigabitEthernet1/0/1 is up, line protocol is up",
" Inbound access list is not set"
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 2222").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan2222",
" no ip address",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 2500").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan2500",
" ip access-group SHIZZLE in",
" ip access-group WHIZZLE out",
" ip vrf forwarding BLAH",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 2998").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan2998",
" ip vrf forwarding patate",
" ip address 1.1.1.1 255.255.255.0",
" ip address 2.1.1.1 255.255.255.0 secondary",
" ip address 3.1.1.1 255.255.255.0 secondary",
" ip access-group GAGA out",
" standby 1 ip 1.1.1.2",
" standby 1 ip 2.1.1.2 secondary",
" standby 1 ip 3.1.1.2 secondary",
" standby 1 timers 5 15",
" standby 1 priority 110",
" standby 1 preempt delay minimum 60",
" standby 1 authentication VLAN2998",
" standby 1 track 101 decrement 50",
" ip helper-address 10.10.10.1",
" ip helper-address 10.10.10.2",
"end"
])
vlan_list = self.switch.get_vlans()
vlan_list = sorted(vlan_list, key=lambda x: x.number)
assert_that(vlan_list, has_length(5))
assert_that(vlan_list[0].number, equal_to(1))
assert_that(vlan_list[0].name, equal_to("default"))
assert_that(len(vlan_list[0].ips), equal_to(0))
assert_that(vlan_list[1].number, equal_to(2222))
assert_that(vlan_list[1].name, equal_to("your-name-is-way-too-long-for-th"))
assert_that(vlan_list[1].vrf_forwarding, equal_to(None))
assert_that(vlan_list[1].access_groups[IN], equal_to(None))
assert_that(vlan_list[1].access_groups[OUT], equal_to(None))
assert_that(len(vlan_list[1].ips), equal_to(0))
assert_that(vlan_list[2].vrf_forwarding, equal_to("BLAH"))
assert_that(vlan_list[2].access_groups[IN], equal_to("SHIZZLE"))
assert_that(vlan_list[2].access_groups[OUT], equal_to("WHIZZLE"))
v3 = vlan_list[3]
assert_that(v3.number, equal_to(2998))
assert_that(v3.name, equal_to(None))
assert_that(v3.vrf_forwarding, equal_to("patate"))
assert_that(v3.access_groups[IN], equal_to(None))
assert_that(v3.access_groups[OUT], equal_to("GAGA"))
assert_that(len(v3.ips), equal_to(3))
assert_that(v3.icmp_redirects, equal_to(True))
v3.ips = sorted(v3.ips, key=lambda ip: (ip.value, ip.prefixlen))
assert_that(str(v3.ips[0].ip), equal_to('1.1.1.1'))
assert_that(v3.ips[0].prefixlen, equal_to(24))
assert_that(str(v3.ips[1].ip), equal_to('2.1.1.1'))
assert_that(v3.ips[1].prefixlen, equal_to(24))
assert_that(str(v3.ips[2].ip), equal_to('3.1.1.1'))
assert_that(v3.ips[2].prefixlen, equal_to(24))
v3_vrrp = v3.vrrp_groups[0]
assert_that(len(v3_vrrp.ips), equal_to(3))
assert_that(v3_vrrp.ips[0], equal_to(IPAddress('1.1.1.2')))
assert_that(v3_vrrp.ips[1], equal_to(IPAddress('2.1.1.2')))
assert_that(v3_vrrp.ips[2], equal_to(IPAddress('3.1.1.2')))
assert_that(v3_vrrp.hello_interval, equal_to(5))
assert_that(v3_vrrp.dead_interval, equal_to(15))
assert_that(v3_vrrp.priority, equal_to(110))
assert_that(v3_vrrp.track_id, equal_to('101'))
assert_that(v3_vrrp.track_decrement, equal_to(50))
assert_that(len(v3.dhcp_relay_servers), equal_to(2))
assert_that(str(v3.dhcp_relay_servers[0]), equal_to('10.10.10.1'))
assert_that(str(v3.dhcp_relay_servers[1]), equal_to('10.10.10.2'))
assert_that(vlan_list[4].number, equal_to(3333))
assert_that(vlan_list[4].name, equal_to("some-name"))
assert_that(len(vlan_list[4].ips), equal_to(0))
assert_that(vlan_list[4].access_groups[IN], equal_to(None))
assert_that(vlan_list[4].access_groups[OUT], equal_to(None))
def test_get_vlan_with_no_interface(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 1750 | begin vlan").and_return([
"vlan 1750",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1750 | begin interface").once().ordered().and_return([
" ^"
"% Invalid input detected at '^' marker."
])
vlan = self.switch.get_vlan(1750)
assert_that(vlan.number, is_(1750))
assert_that(vlan.name, is_(None))
assert_that(vlan.access_groups[IN], is_(none()))
assert_that(vlan.access_groups[OUT], is_(none()))
assert_that(vlan.vrf_forwarding, is_(none()))
assert_that(vlan.ips, is_(empty()))
assert_that(vlan.vrrp_groups, is_(empty()))
assert_that(vlan.dhcp_relay_servers, is_(empty()))
assert_that(vlan.icmp_redirects, is_(True))
def test_get_vlan_with_an_empty_interface(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 1750 | begin vlan").and_return([
"vlan 1750",
" name Shizzle",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1750 | begin interface").once().ordered().and_return([
"interface Vlan1750",
" no ip address"
"end"
])
vlan = self.switch.get_vlan(1750)
assert_that(vlan.number, is_(1750))
assert_that(vlan.name, is_("Shizzle"))
assert_that(vlan.access_groups[IN], is_(none()))
assert_that(vlan.access_groups[OUT], is_(none()))
assert_that(vlan.vrf_forwarding, is_(none()))
assert_that(vlan.ips, is_(empty()))
assert_that(vlan.vrrp_groups, is_(empty()))
assert_that(vlan.dhcp_relay_servers, is_(empty()))
def test_get_vlan_with_a_full_interface(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 1750 | begin vlan").and_return([
"vlan 1750",
" name Shizzle",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1750 | begin interface").once().ordered().and_return([
"interface Vlan1750",
" ip vrf forwarding SHIZZLE",
" ip address 1.1.1.1 255.255.255.0",
" ip address 2.1.1.1 255.255.255.0 secondary",
" ip address 3.1.1.1 255.255.255.0 secondary",
" ip access-group ACL-IN in",
" ip access-group ACL-OUT out",
" standby 1 ip 1.1.1.2",
" standby 1 ip 2.1.1.2 secondary",
" standby 1 ip 3.1.1.2 secondary",
" standby 1 timers 5 15",
" standby 1 priority 110",
" standby 1 preempt delay minimum 60",
" standby 1 authentication VLAN2998",
" standby 1 track 101 decrement 50",
" ip helper-address 10.10.10.1",
" ip helper-address 10.10.10.2",
" no ip redirects"
"end"
])
vlan = self.switch.get_vlan(1750)
assert_that(vlan.number, is_(1750))
assert_that(vlan.name, is_("Shizzle"))
assert_that(vlan.access_groups[IN], is_("ACL-IN"))
assert_that(vlan.access_groups[OUT], is_("ACL-OUT"))
assert_that(vlan.vrf_forwarding, is_("SHIZZLE"))
assert_that(vlan.ips, has_length(3))
assert_that(vlan.icmp_redirects, is_(False))
vrrp_group = vlan.vrrp_groups[0]
assert_that(len(vrrp_group.ips), equal_to(3))
assert_that(vrrp_group.ips[0], equal_to(IPAddress('1.1.1.2')))
assert_that(vrrp_group.hello_interval, equal_to(5))
assert_that(vrrp_group.dead_interval, equal_to(15))
assert_that(vrrp_group.priority, equal_to(110))
assert_that(vrrp_group.track_id, equal_to('101'))
assert_that(vrrp_group.track_decrement, equal_to(50))
assert_that(len(vlan.dhcp_relay_servers), equal_to(2))
assert_that(str(vlan.dhcp_relay_servers[0]), equal_to('10.10.10.1'))
assert_that(str(vlan.dhcp_relay_servers[1]), equal_to('10.10.10.2'))
def test_get_vlan_unknown_interface_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 1750 | begin vlan").once().ordered().and_return([
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.get_vlan(1750)
assert_that(str(expect.exception), equal_to("Vlan 1750 not found"))
def test_get_vlan_has_interface_filled(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 900 | begin vlan").and_return([
"vlan 900",
" name Shizzle",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 900 | begin interface").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker."
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config | begin interface").and_return([
"interface FastEthernet0/16",
" switchport access vlan 900",
" switchport mode access",
"!",
"interface FastEthernet0/17",
" switchport trunk allowed vlan 900,1000",
" switchport access vlan 900",
"!",
"interface FastEthernet0/18",
" switchport trunk allowed vlan 900,1000",
" switchport mode trunk",
"!",
"interface FastEthernet0/19",
" switchport access vlan 1100",
" switchport trunk native vlan 2",
" switchport trunk allowed vlan 900,1000",
" switchport mode access",
"!",
"interface FastEthernet0/20",
" switchport access vlan 1100",
" switchport trunk native vlan 900",
" switchport trunk allowed vlan 800,500",
" switchport mode trunk",
"!",
"interface FastEthernet0/21",
" switchport trunk allowed vlan 899-901",
" switchport mode trunk",
"!",
"line con 0",
"transport input ssh"
])
vlan = self.switch.get_vlan(900)
assert_that(vlan.number, is_(900))
assert_that(vlan.name, is_("Shizzle"))
def test_add_vlan(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("name Gertrude").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_vlan(2999, name="Gertrude")
def test_add_vlan_refused_number(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("vlan 2999").once().ordered().and_return([
"Command rejected: Bad VLAN list - character #5 (EOL) delimits a VLAN",
"number which is out of the range 1..4094."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered()
with self.assertRaises(BadVlanNumber) as expect:
self.switch.add_vlan(2999, name="Gertrude")
assert_that(str(expect.exception), equal_to("Vlan number is invalid"))
def test_add_vlan_refused_name(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("name Gertr dude").once().ordered().and_return([
"name Gertr dude",
" ^",
"% Invalid input detected at '^' marker."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
self.mocked_ssh_client.should_receive("do").with_args("exit").once().ordered().and_return([])
with self.assertRaises(BadVlanName) as expect:
self.switch.add_vlan(2999, name="Gertr dude")
assert_that(str(expect.exception), equal_to("Vlan name is invalid"))
def test_add_vlan_no_name(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_vlan(2999)
def test_add_vlan_already_exist_fails(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([
"vlan 2999",
"end"
])
with self.assertRaises(VlanAlreadyExist) as expect:
self.switch.add_vlan(2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 already exists"))
def test_remove_vlan_also_removes_associated_vlan_interface(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([
"vlan 2999",
"end"]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("no interface vlan 2999").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no vlan 2999").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).once().ordered()
self.switch.remove_vlan(2999)
def test_remove_vlan_invalid_vlan_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([
]).once().ordered()
with self.assertRaises(UnknownVlan) as expect:
self.switch.remove_vlan(2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 not found"))
def test_remove_vlan_ignores_removing_interface_not_created(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([
"vlan 2999",
"end"]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("no interface vlan 2999").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker."
])
self.mocked_ssh_client.should_receive("do").with_args("no vlan 2999").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).once().ordered()
self.switch.remove_vlan(2999)
def test_get_interface(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface FastEthernet0/2 | begin interface").once().ordered().and_return([
"interface FastEthernet0/2",
" switchport access vlan 100",
" switchport trunk native vlan 200",
" switchport trunk allowed vlan 300,302-304",
" switchport mode access",
"!",
"end",
])
interface = self.switch.get_interface('FastEthernet0/2')
assert_that(interface.name, equal_to("FastEthernet0/2"))
assert_that(interface.shutdown, equal_to(False))
assert_that(interface.port_mode, equal_to(ACCESS))
assert_that(interface.access_vlan, equal_to(100))
assert_that(interface.trunk_native_vlan, equal_to(None))
assert_that(interface.trunk_vlans, equal_to([]))
def test_get_nonexistent_interface_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface SlowEthernet42/9999 | begin interface").and_return([
" ^",
"% Invalid input detected at '^' marker."
])
with self.assertRaises(UnknownInterface) as expect:
self.switch.get_interface("SlowEthernet42/9999")
assert_that(str(expect.exception), equal_to("Unknown interface SlowEthernet42/9999"))
def test_get_interfaces(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config | begin interface").once().ordered().and_return([
"interface FastEthernet0/1",
"!",
"interface FastEthernet0/2",
" switchport access vlan 100",
" switchport trunk native vlan 200",
" switchport trunk allowed vlan 300,302-304",
" switchport mode access",
"!",
"interface GigabitEthernet0/3",
" switchport access vlan 100",
" switchport trunk native vlan 200",
" switchport trunk allowed vlan 300,302-304",
" switchport mode trunk",
" shutdown",
"!",
"interface GigabitEthernet0/4",
" switchport access vlan 100",
" switchport trunk native vlan 200",
" switchport trunk allowed vlan 300,302-304",
"!",
"interface GigabitEthernet1/0/5",
" switchport trunk allowed vlan 300,302-304",
" switchport mode trunk",
"!",
"interface Vlan722",
" description MANAGEMENT_VLAN",
" ip address 172.19.234.11 255.255.255.224",
" no ip route-cache",
"!",
" interface Vlan2999",
" no ip address",
" no ip route-cache",
" shutdown",
"!",
"end",
])
result = self.switch.get_interfaces()
if1, if2, if3, if4, if5 = result
assert_that(if1.name, equal_to("FastEthernet0/1"))
assert_that(if1.shutdown, equal_to(False))
assert_that(if1.port_mode, equal_to(DYNAMIC))
assert_that(if1.access_vlan, equal_to(None))
assert_that(if1.trunk_native_vlan, equal_to(None))
assert_that(if1.trunk_vlans, equal_to([]))
assert_that(if2.name, equal_to("FastEthernet0/2"))
assert_that(if2.shutdown, equal_to(False))
assert_that(if2.port_mode, equal_to(ACCESS))
assert_that(if2.access_vlan, equal_to(100))
assert_that(if2.trunk_native_vlan, equal_to(None))
assert_that(if2.trunk_vlans, equal_to([]))
assert_that(if3.name, equal_to("GigabitEthernet0/3"))
assert_that(if3.shutdown, equal_to(True))
assert_that(if3.port_mode, equal_to(TRUNK))
assert_that(if3.access_vlan, equal_to(None))
assert_that(if3.trunk_native_vlan, equal_to(200))
assert_that(if3.trunk_vlans, equal_to([300, 302, 303, 304]))
assert_that(if4.name, equal_to("GigabitEthernet0/4"))
assert_that(if4.port_mode, equal_to(DYNAMIC))
assert_that(if4.access_vlan, equal_to(100))
assert_that(if4.trunk_native_vlan, equal_to(200))
assert_that(if4.trunk_vlans, equal_to([300, 302, 303, 304]))
assert_that(if5.name, equal_to("GigabitEthernet1/0/5"))
assert_that(if5.trunk_native_vlan, equal_to(None))
assert_that(if5.trunk_vlans, equal_to([300, 302, 303, 304]))
def parse_range_test(self):
result = parse_vlan_ranges(None)
assert_that(list(result), equal_to(range(1, 4094)))
result = parse_vlan_ranges("none")
assert_that(list(result), equal_to([]))
result = parse_vlan_ranges("1")
assert_that(list(result), equal_to([1]))
result = parse_vlan_ranges("2-5")
assert_that(list(result), equal_to([2, 3, 4, 5]))
result = parse_vlan_ranges("1,3-5,7")
assert_that(list(result), equal_to([1, 3, 4, 5, 7]))
def test_set_access_vlan(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([
"vlan 2999",
"end"]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface FastEthernet0/4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport access vlan 2999").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_access_vlan("FastEthernet0/4", vlan=2999)
def test_set_access_vlan_invalid_vlan_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([
]).once().ordered()
with self.assertRaises(UnknownVlan) as expect:
self.switch.set_access_vlan("FastEthernet0/4", vlan=2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 not found"))
def test_set_access_vlan_invalid_interface_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([
"vlan 2999",
"end"]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface SlowEthernet42/9999").and_return([
" ^",
"% Invalid input detected at '^' marker."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).once().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_access_vlan("SlowEthernet42/9999", vlan=2999)
assert_that(str(expect.exception), equal_to("Unknown interface SlowEthernet42/9999"))
def test_unset_interface_access_vlan(self):
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface FastEthernet0/4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no switchport access vlan").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.unset_interface_access_vlan("FastEthernet0/4")
def test_unset_interface_access_vlan_invalid_interface_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface SlowEthernet42/9999").and_return([
" ^",
"% Invalid input detected at '^' marker."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).once().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.unset_interface_access_vlan("SlowEthernet42/9999")
assert_that(str(expect.exception), equal_to("Unknown interface SlowEthernet42/9999"))
def test_set_access_mode(self):
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface FastEthernet0/4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport mode access").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no switchport trunk native vlan").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no switchport trunk allowed vlan").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_access_mode("FastEthernet0/4")
def test_set_access_mode_invalid_interface_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface SlowEthernet42/9999").and_return([
" ^",
"% Invalid input detected at '^' marker."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).once().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_access_mode("SlowEthernet42/9999")
assert_that(str(expect.exception), equal_to("Unknown interface SlowEthernet42/9999"))
def test_set_bond_access_mode(self):
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface Port-channel4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport mode access").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no switchport trunk native vlan").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no switchport trunk allowed vlan").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_bond_access_mode(4)
def test_set_bond_access_mode_invalid_interface_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface Port-channel9999").and_return([
" ^",
"% Invalid input detected at '^' marker."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).once().ordered()
with self.assertRaises(UnknownBond) as expect:
self.switch.set_bond_access_mode(9999)
assert_that(str(expect.exception), equal_to("Bond 9999 not found"))
def test_set_trunk_mode_initial(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface FastEthernet0/4").once().ordered().and_return([
"Building configuration...",
"Current configuration : 156 bytes",
"!",
"interface FastEthernet0/4",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface FastEthernet0/4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport mode trunk").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk allowed vlan none").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no switchport access vlan").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_trunk_mode("FastEthernet0/4")
def test_set_trunk_mode_initial_invalid_interface_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface SlowEthernet42/9999").and_return([
" ^",
"% Invalid input detected at '^' marker."
])
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_trunk_mode("SlowEthernet42/9999")
assert_that(str(expect.exception), equal_to("Unknown interface SlowEthernet42/9999"))
def test_set_trunk_mode_switching_mode(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface FastEthernet0/4").once().ordered().and_return([
"Building configuration...",
"Current configuration : 156 bytes",
"!",
"interface FastEthernet0/4",
" switchport mode access",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface FastEthernet0/4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport mode trunk").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk allowed vlan none").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no switchport access vlan").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_trunk_mode("FastEthernet0/4")
def test_set_trunk_mode_idempotent(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface FastEthernet0/4").once().ordered().and_return([
"Building configuration...",
"Current configuration : 156 bytes",
"!",
"interface FastEthernet0/4",
" switchport trunk allowed vlan 2999",
" switchport mode trunk",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk allowed vlan none").never()
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface FastEthernet0/4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport mode trunk").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no switchport access vlan").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_trunk_mode("FastEthernet0/4")
def test_set_bond_trunk_mode_initial(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface Port-channel4").once().ordered().and_return([
"Building configuration...",
"Current configuration : 156 bytes",
"!",
"interface Port-channel4",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface Port-channel4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport mode trunk").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk allowed vlan none").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no switchport access vlan").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_bond_trunk_mode(4)
def test_set_bond_trunk_mode_initial_invalid_interface_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface Port-channel9999").and_return([
" ^",
"% Invalid input detected at '^' marker."
])
with self.assertRaises(UnknownBond) as expect:
self.switch.set_bond_trunk_mode(9999)
assert_that(str(expect.exception), equal_to("Bond 9999 not found"))
def test_set_bond_trunk_mode_switching_mode(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface Port-channel4").once().ordered().and_return([
"Building configuration...",
"Current configuration : 156 bytes",
"!",
"interface Port-channel4",
" switchport mode access",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface Port-channel4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport mode trunk").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk allowed vlan none").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no switchport access vlan").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_bond_trunk_mode(4)
def test_set_bond_trunk_mode_idempotent(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface Port-channel4").once().ordered().and_return([
"Building configuration...",
"Current configuration : 156 bytes",
"!",
"interface Port-channel4",
" switchport trunk allowed vlan 2999",
" switchport mode trunk",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk allowed vlan none").never()
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface Port-channel4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport mode trunk").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no switchport access vlan").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_bond_trunk_mode(4)
def test_add_trunk_vlan(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([
"vlan 2999",
"end"]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface FastEthernet0/4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk allowed vlan add 2999").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_trunk_vlan("FastEthernet0/4", vlan=2999)
def test_add_trunk_vlan_invalid_vlan_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([
]).once().ordered()
with self.assertRaises(UnknownVlan) as expect:
self.switch.add_trunk_vlan("FastEthernet0/4", vlan=2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 not found"))
def test_add_trunk_vlan_invalid_interface_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([
"vlan 2999",
"end"]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface SlowEthernet42/9999").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).once().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.add_trunk_vlan("SlowEthernet42/9999", vlan=2999)
assert_that(str(expect.exception), equal_to("Unknown interface SlowEthernet42/9999"))
def test_remove_trunk_vlan(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface FastEthernet0/4 | begin interface").once().ordered().and_return([
"interface FastEthernet0/4",
" switchport access vlan 100",
" switchport trunk native vlan 200",
" switchport trunk allowed vlan 300,302-304,2998-3000",
" switchport mode trunk",
"end",
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface FastEthernet0/4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk allowed vlan remove 2999").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_trunk_vlan("FastEthernet0/4", vlan=2999)
def test_remove_trunk_vlan_invalid_vlan_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface FastEthernet0/4 | begin interface").once().ordered().and_return([
"interface FastEthernet0/4",
" switchport access vlan 100",
" switchport trunk native vlan 200",
" switchport trunk allowed vlan 300,302-304",
" switchport mode trunk",
"end",
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.remove_trunk_vlan("FastEthernet0/4", vlan=2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 not found"))
def test_remove_trunk_vlan_invalid_interface_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface SlowEthernet42/9999 | begin interface").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker."
])
with self.assertRaises(UnknownInterface) as expect:
self.switch.remove_trunk_vlan("SlowEthernet42/9999", vlan=2999)
assert_that(str(expect.exception), equal_to("Unknown interface SlowEthernet42/9999"))
def test_remove_trunk_vlan_no_port_mode_still_working(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface FastEthernet0/4 | begin interface").once().ordered().and_return([
"interface FastEthernet0/4",
" switchport access vlan 100",
" switchport trunk native vlan 200",
" switchport trunk allowed vlan 300,302-304",
"end",
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface FastEthernet0/4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk allowed vlan remove 303").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_trunk_vlan("FastEthernet0/4", vlan=303)
def test_add_bond_trunk_vlan(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([
"vlan 2999",
"end"]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface Port-channel4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk allowed vlan add 2999").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_bond_trunk_vlan(4, vlan=2999)
def test_add_bond_trunk_vlan_invalid_vlan_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([
]).once().ordered()
with self.assertRaises(UnknownVlan) as expect:
self.switch.add_bond_trunk_vlan(4, vlan=2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 not found"))
def test_add_bond_trunk_vlan_invalid_interface_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([
"vlan 2999",
"end"]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface Port-channel9999").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).once().ordered()
with self.assertRaises(UnknownBond) as expect:
self.switch.add_bond_trunk_vlan(9999, vlan=2999)
assert_that(str(expect.exception), equal_to("Bond 9999 not found"))
def test_remove_bond_trunk_vlan(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface Port-channel4 | begin interface").once().ordered().and_return([
"interface Port-channel4",
" switchport access vlan 100",
" switchport trunk native vlan 200",
" switchport trunk allowed vlan 300,302-304,2998-3000",
" switchport mode trunk",
"end",
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface Port-channel4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk allowed vlan remove 2999").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_bond_trunk_vlan(4, vlan=2999)
def test_remove_bond_trunk_vlan_invalid_vlan_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface Port-channel4 | begin interface").once().ordered().and_return([
"interface Port-channel4",
" switchport access vlan 100",
" switchport trunk native vlan 200",
" switchport trunk allowed vlan 300,302-304",
" switchport mode trunk",
"end",
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.remove_bond_trunk_vlan(4, vlan=2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 not found"))
def test_remove_bond_trunk_vlan_invalid_interface_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface Port-channel9999 | begin interface").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker."
])
with self.assertRaises(UnknownBond) as expect:
self.switch.remove_bond_trunk_vlan(9999, vlan=2999)
assert_that(str(expect.exception), equal_to("Bond 9999 not found"))
def test_remove_bond_trunk_vlan_no_port_mode_still_working(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface Port-channel4 | begin interface").once().ordered().and_return([
"interface Port-channel4",
" switchport access vlan 100",
" switchport trunk native vlan 200",
" switchport trunk allowed vlan 300,302-304",
"end",
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface Port-channel4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk allowed vlan remove 303").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_bond_trunk_vlan(4, vlan=303)
def test_set_interface_state_off(self):
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface FastEthernet0/4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_interface_state("FastEthernet0/4", OFF)
def test_set_interface_state_off_invalid_interface_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface SlowEthernet42/9999").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).once().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_interface_state("SlowEthernet42/9999", OFF)
assert_that(str(expect.exception), equal_to("Unknown interface SlowEthernet42/9999"))
def test_set_interface_state_on(self):
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface FastEthernet0/4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_interface_state("FastEthernet0/4", ON)
def test_set_interface_state_on_invalid_interface_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface SlowEthernet42/9999").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).once().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_interface_state("SlowEthernet42/9999", ON)
assert_that(str(expect.exception), equal_to("Unknown interface SlowEthernet42/9999"))
def test_set_interface_native_vlan_on_trunk(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([
"vlan 2999",
"end"]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface FastEthernet0/4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk native vlan 2999").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_interface_native_vlan("FastEthernet0/4", vlan=2999)
def test_set_interface_native_vlan_on_trunk_invalid_vlan_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([
]).once().ordered()
with self.assertRaises(UnknownVlan) as expect:
self.switch.set_interface_native_vlan("FastEthernet0/4", vlan=2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 not found"))
def test_set_interface_native_vlan_on_trunk_invalid_interface_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([
"vlan 2999",
"end"]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface SlowEthernet42/9999").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).once().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_interface_native_vlan("SlowEthernet42/9999", vlan=2999)
assert_that(str(expect.exception), equal_to("Unknown interface SlowEthernet42/9999"))
def test_unset_interface_native_vlan_on_trunk(self):
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface FastEthernet0/4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no switchport trunk native vlan").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.unset_interface_native_vlan("FastEthernet0/4")
def test_unset_interface_native_vlan_on_trunk_invalid_interface_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface SlowEthernet42/9999").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).once().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.unset_interface_native_vlan("SlowEthernet42/9999")
assert_that(str(expect.exception), equal_to("Unknown interface SlowEthernet42/9999"))
def test_set_bond_native_vlan_on_trunk(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([
"vlan 2999",
"end"]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface Port-channel4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("switchport trunk native vlan 2999").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_bond_native_vlan(4, vlan=2999)
def test_set_bond_native_vlan_on_trunk_invalid_vlan_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([
]).once().ordered()
with self.assertRaises(UnknownVlan) as expect:
self.switch.set_bond_native_vlan(4, vlan=2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 not found"))
def test_set_bond_native_vlan_on_trunk_invalid_interface_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([
"vlan 2999",
"end"]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface Port-channel9999").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).once().ordered()
with self.assertRaises(UnknownBond) as expect:
self.switch.set_bond_native_vlan(9999, vlan=2999)
assert_that(str(expect.exception), equal_to("Bond 9999 not found"))
def test_unset_bond_native_vlan_on_trunk(self):
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface Port-channel4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no switchport trunk native vlan").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.unset_bond_native_vlan(4)
def test_unset_bond_native_vlan_on_trunk_invalid_interface_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface Port-channel9999").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).once().ordered()
with self.assertRaises(UnknownBond) as expect:
self.switch.unset_bond_native_vlan(9999)
assert_that(str(expect.exception), equal_to("Bond 9999 not found"))
def test_add_ip(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" no ip address",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("ip address 1.2.3.4 255.255.255.0").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.4/24"))
def test_add_another_ip(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" ip address 2.1.1.1 255.255.255.0 secondary",
" ip address 1.2.1.1 255.255.255.128",
" ip access-group SHIZZLE in",
" ip access-group wHIZZLE out",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no ip redirects").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("ip address 2.3.4.5 255.255.255.128 secondary").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_ip_to_vlan(1234, IPNetwork("2.3.4.5/25"))
def test_add_unavailable_ip_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" no ip address",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("ip address 1.2.3.4 255.255.255.0").and_return([
"% 2.1.1.128 overlaps with secondary address on Vlan2998"
]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(IPNotAvailable) as expect:
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.4/24"))
assert_that(str(expect.exception), equal_to("IP 1.2.3.4/24 is not available in this vlan: % 2.1.1.128 overlaps with secondary address on Vlan2998"))
def test_add_unavailable_ip_because_secondary_elsewhere_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" no ip address",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("ip address 1.2.3.4 255.255.255.0").and_return([
"% 2.1.1.128 is assigned as a secondary address on Vlan2998"
]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(IPNotAvailable) as expect:
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.4/24"))
assert_that(str(expect.exception), equal_to("IP 1.2.3.4/24 is not available in this vlan: % 2.1.1.128 is assigned as a secondary address on Vlan2998"))
def test_add_an_ip_already_present_in_the_same_port_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" ip address 1.2.3.4 255.255.255.128",
" ip access-group SHIZZLE in",
" ip access-group wHIZZLE out",
"end"
])
with self.assertRaises(IPAlreadySet) as expect:
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.4/24"))
assert_that(str(expect.exception), equal_to("IP 1.2.3.4/24 is already present in this vlan as 1.2.3.4/25"))
def test_add_an_ip_already_present_in_the_same_port_secondary_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" ip address 2.1.1.1 255.255.255.0 secondary",
" ip address 1.2.3.4 255.255.255.128",
" ip access-group SHIZZLE in",
" ip access-group wHIZZLE out",
"end"
])
with self.assertRaises(IPAlreadySet) as expect:
self.switch.add_ip_to_vlan(1234, IPNetwork("2.1.1.1/24"))
assert_that(str(expect.exception), equal_to("IP 2.1.1.1/24 is already present in this vlan as 2.1.1.1/24"))
def test_add_ip_to_vlan_without_interface_creates_it(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker.",
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 1234 | begin vlan").once().ordered().and_return([
"vlan 1234",
"end",
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("ip address 1.2.3.4 255.255.255.0").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.4/24"))
def test_add_ip_to_unknown_vlan(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker.",
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 1234 | begin vlan").once().ordered().and_return([
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.4/24"))
assert_that(str(expect.exception), equal_to("Vlan 1234 not found"))
def test_remove_lonely_ip(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" ip address 1.2.3.4 255.255.255.128",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no ip address 1.2.3.4 255.255.255.128").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_ip_from_vlan(1234, IPNetwork("1.2.3.4/25"))
def test_remove_secondary_ip(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" ip address 2.1.1.1 255.255.255.0 secondary",
" ip address 1.2.3.4 255.255.255.128",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no ip address 2.1.1.1 255.255.255.0 secondary").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_ip_from_vlan(1234, IPNetwork("2.1.1.1/24"))
def test_remove_a_primary_ip_that_have_secondary_ips(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" ip address 2.1.1.1 255.255.255.0 secondary",
" ip address 3.1.1.1 255.255.255.0 secondary",
" ip address 1.2.3.4 255.255.255.128",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("ip address 2.1.1.1 255.255.255.0").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_ip_from_vlan(1234, IPNetwork("1.2.3.4/25"))
def test_cant_remove_unknown_ip(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" ip address 2.1.1.1 255.255.255.0 secondary",
" ip address 3.1.1.1 255.255.255.0 secondary",
" ip address 1.2.3.4 255.255.255.128",
"end"
])
with self.assertRaises(UnknownIP) as expect:
self.switch.remove_ip_from_vlan(1234, IPNetwork("5.5.5.5/25"))
assert_that(str(expect.exception), equal_to("IP 5.5.5.5/25 not found"))
def test_cant_remove_known_ip_with_wrong_netmask(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" ip address 2.1.1.1 255.255.255.0 secondary",
" ip address 3.1.1.1 255.255.255.0 secondary",
" ip address 1.2.3.4 255.255.255.128",
"end"
])
with self.assertRaises(UnknownIP) as expect:
self.switch.remove_ip_from_vlan(1234, IPNetwork("1.2.3.4/27"))
assert_that(str(expect.exception), equal_to("IP 1.2.3.4/27 not found"))
def test_remove_ip_from_known_vlan_with_no_interface(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker.",
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 1234 | begin vlan").once().ordered().and_return([
"vlan 1234",
"end",
])
with self.assertRaises(UnknownIP) as expect:
self.switch.remove_ip_from_vlan(1234, IPNetwork("1.2.3.4/24"))
assert_that(str(expect.exception), equal_to("IP 1.2.3.4/24 not found"))
def test_remove_ip_from_unknown_vlan(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker.",
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 1234 | begin vlan").once().ordered().and_return([
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.remove_ip_from_vlan(1234, IPNetwork("1.2.3.4/24"))
assert_that(str(expect.exception), equal_to("Vlan 1234 not found"))
def test_set_access_group_success(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 2500").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan2500",
" no ip address",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 2500").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("ip access-group TheAccessGroup in").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_vlan_access_group(2500, IN, "TheAccessGroup")
def test_set_access_group_incorrect_name(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 2500").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan2500",
" no ip address",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 2500").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("ip access-group TheAc cessGroup out").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(InvalidAccessGroupName) as expect:
self.switch.set_vlan_access_group(2500, OUT, "TheAc cessGroup")
assert_that(str(expect.exception), equal_to("Access Group Name is invalid: TheAc cessGroup"))
def test_set_access_group_without_interface_creates_it(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 2500").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker.",
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2500 | begin vlan").once().ordered().and_return([
"vlan 2500",
"end",
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 2500").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("ip access-group TheAccessGroup in").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_vlan_access_group(2500, IN, "TheAccessGroup")
def test_set_access_group_unknown_vlan(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 2500").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker.",
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2500 | begin vlan").once().ordered().and_return([
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.set_vlan_access_group(2500, IN, "TheAccessGroup")
assert_that(str(expect.exception), equal_to("Vlan 2500 not found"))
def test_remove_access_group_success(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 2500").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan2500",
" no ip address",
" ip access-group TheAccessGroup in",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 2500").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no ip access-group in").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.unset_vlan_access_group(2500, IN)
def test_remove_access_group_success_out_also(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 2500").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan2500",
" no ip address",
" ip access-group TheAccessGroup out",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 2500").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no ip access-group out").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.unset_vlan_access_group(2500, OUT)
def test_remove_access_group_not_set(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 2500").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan2500",
" no ip address",
" ip access-group TheAccessGroup in",
"end"
])
with self.assertRaises(UnknownAccessGroup) as expect:
self.switch.unset_vlan_access_group(2500, OUT)
assert_that(str(expect.exception), equal_to("Outgoing IP access group not found"))
def test_remove_access_group_from_known_vlan_with_no_interface(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 2500").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker.",
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2500 | begin vlan").once().ordered().and_return([
"vlan 2500",
"end",
])
with self.assertRaises(UnknownAccessGroup) as expect:
self.switch.unset_vlan_access_group(2500, IN)
assert_that(str(expect.exception), equal_to("Inbound IP access group not found"))
def test_remove_access_group_from_unknown_vlan(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 2500").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker.",
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2500 | begin vlan").once().ordered().and_return([
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.unset_vlan_access_group(2500, IN)
assert_that(str(expect.exception), equal_to("Vlan 2500 not found"))
def test_set_vlan_vrf_success(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 2500").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan2500",
" no ip address",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 2500").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("ip vrf forwarding MYVRF").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_vlan_vrf(2500, "MYVRF")
def test_set_vlan_vrf_incorrect_name(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 2500").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan2500",
" no ip address",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 2500").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("ip vrf forwarding MYVRF").once().ordered().and_return([
"% VRF MYVRF not configured."
])
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(UnknownVrf) as expect:
self.switch.set_vlan_vrf(2500, "MYVRF")
assert_that(str(expect.exception), equal_to("VRF name \"MYVRF\" was not configured."))
def test_set_vlan_vrf_without_interface_creates_it(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 2500").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker.",
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2500 | begin vlan").once().ordered().and_return([
"vlan 2500",
"end",
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 2500").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("ip vrf forwarding MYVRF").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_vlan_vrf(2500, "MYVRF")
def test_set_vlan_vrf_unknown_vlan(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 2500").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker.",
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2500 | begin vlan").once().ordered().and_return([
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.set_vlan_vrf(2500, "MYVRF")
assert_that(str(expect.exception), equal_to("Vlan 2500 not found"))
def test_unset_vlan_vrf_success(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 2500").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan2500",
" no ip address",
" ip vrf forwarding DEFAULT-LAN",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 2500").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no ip vrf forwarding").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.unset_vlan_vrf(2500)
def test_unset_vlan_vrf_not_set(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 2500").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan2500",
" no ip address",
"end"
])
with self.assertRaises(VlanVrfNotSet) as expect:
self.switch.unset_vlan_vrf(2500)
assert_that(str(expect.exception), equal_to("VRF is not set on vlan 2500"))
def test_unset_vlan_vrf_from_known_vlan_with_no_interface(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 2500").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker.",
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2500 | begin vlan").once().ordered().and_return([
"vlan 2500",
"end",
])
with self.assertRaises(VlanVrfNotSet) as expect:
self.switch.unset_vlan_vrf(2500)
assert_that(str(expect.exception), equal_to("VRF is not set on vlan 2500"))
def test_unset_vlan_vrf_from_unknown_vlan(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 2500").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker.",
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2500 | begin vlan").once().ordered().and_return([
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.unset_vlan_vrf(2500)
assert_that(str(expect.exception), equal_to("Vlan 2500 not found"))
@mock.patch("netman.adapters.switches.cisco.SshClient")
def test_connect(self, ssh_client_class_mock):
self.switch = Cisco(SwitchDescriptor(
hostname="my.hostname", username="the_user", password="the_password", model="cisco", port=22))
self.mocked_ssh_client = flexmock()
ssh_client_class_mock.return_value = self.mocked_ssh_client
self.mocked_ssh_client.should_receive("get_current_prompt").and_return("hostname>").once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("enable", wait_for=": ").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("the_password").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("terminal length 0").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("terminal width 0").and_return([]).once().ordered()
self.switch.connect()
ssh_client_class_mock.assert_called_with(
host="my.hostname",
username="the_user",
password="the_password",
port=22
)
@mock.patch("netman.adapters.switches.cisco.SshClient")
def test_connect_without_port_uses_default(self, ssh_client_class_mock):
self.switch = Cisco(SwitchDescriptor(hostname="my.hostname", username="the_user", password="the_password", model="cisco"))
self.mocked_ssh_client = flexmock()
ssh_client_class_mock.return_value = self.mocked_ssh_client
self.mocked_ssh_client.should_receive("get_current_prompt").and_return("hostname>").once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("enable", wait_for=": ").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("the_password").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("terminal length 0").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("terminal width 0").and_return([]).once().ordered()
self.switch.connect()
ssh_client_class_mock.assert_called_with(
host="my.hostname",
username="the_user",
password="the_password"
)
@mock.patch("netman.adapters.switches.cisco.SshClient")
def test_auto_enabled_switch_doesnt_require_enable(self, ssh_client_class_mock):
self.switch = Cisco(SwitchDescriptor(hostname="my.hostname", username="the_user", password="the_password", model="cisco", port=8000))
self.mocked_ssh_client = flexmock()
ssh_client_class_mock.return_value = self.mocked_ssh_client
self.mocked_ssh_client.should_receive("get_current_prompt").and_return("hostname#").once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("enable", wait_for=": ").never()
self.mocked_ssh_client.should_receive("do").with_args("terminal length 0").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("terminal width 0").and_return([]).once().ordered()
self.switch.connect()
ssh_client_class_mock.assert_called_with(
host="my.hostname",
username="the_user",
password="the_password",
port=8000
)
def test_disconnect(self):
logger = flexmock()
self.switch.logger = logger
logger.should_receive("debug")
mocked_ssh_client = flexmock()
self.switch.ssh = mocked_ssh_client
mocked_ssh_client.should_receive("quit").with_args("exit").once().ordered()
logger.should_receive("info").with_args("FULL TRANSACTION LOG").once()
self.switch.ssh.full_log = "FULL TRANSACTION LOG"
self.switch.disconnect()
def test_transactions_commit_write_memory(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 2999 | begin vlan").and_return([])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("name Gertrude").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).once().ordered()
self.switch.start_transaction()
self.switch.add_vlan(2999, name="Gertrude")
self.mocked_ssh_client.should_receive("do").with_args("write memory").once().ordered()
self.switch.commit_transaction()
self.switch.end_transaction()
def test_add_vrrp_success_single_ip(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" no ip address",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 1 timers 5 15").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 1 priority 110").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 1 preempt delay minimum 60").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 1 authentication VLAN1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 1 track 101 decrement 50").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 1 ip 1.2.3.4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=5, dead_interval=15,
track_id=101, track_decrement=50)
def test_add_vrrp_success_multiple_ip(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" no ip address",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 1 timers 5 15").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 1 priority 110").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 1 preempt delay minimum 60").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 1 authentication VLAN1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 1 track 101 decrement 50").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 1 ip 1.2.3.4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 1 ip 5.6.7.8 secondary").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4"), IPAddress("5.6.7.8")], priority=110,
hello_interval=5, dead_interval=15, track_id=101,
track_decrement=50)
def test_add_vrrp_from_unknown_vlan(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker.",
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 1234 | begin vlan").once().ordered().and_return([
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")])
assert_that(str(expect.exception), equal_to("Vlan 1234 not found"))
def test_add_existing_vrrp_to_same_vlan(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" no ip address",
" standby 1 ip 5.6.7.8",
" standby 1 priority 80",
"end"
])
with self.assertRaises(VrrpAlreadyExistsForVlan) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=90)
assert_that(str(expect.exception), equal_to("Vrrp group 1 is already in use on vlan 1234"))
def test_add_vrrp_to_vlan_with_another_vrrp(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" no ip address",
" standby 1 ip 5.6.7.8",
" standby 1 priority 80",
" standby 1 preempt delay minimum 60",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 2 priority 90").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 2 preempt delay minimum 60").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 2 authentication VLAN1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 2 ip 1.2.3.5").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_vrrp_group(1234, 2, ips=[IPAddress("1.2.3.5")], priority=90)
def test_add_vrrp_with_out_of_range_group_id(self):
with self.assertRaises(BadVrrpGroupNumber) as expect:
self.switch.add_vrrp_group(1234, 0, ips=[IPAddress("1.2.3.4")], priority=255)
assert_that(str(expect.exception), equal_to("VRRP group number is invalid, must be contained between 1 and 255"))
def test_add_vrrp_with_bad_priority(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" no ip address",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 1 priority 256").and_return([" ^",
"% Invalid input detected at '^' marker."
""]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(BadVrrpPriorityNumber) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=256)
assert_that(str(expect.exception), equal_to("VRRP priority value is invalid, must be contained between 1 and 255"))
def test_add_vrrp_with_bad_timers(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" no ip address",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 1 timers -1 -1").and_return([
" ^",
"% Invalid input detected at '^' marker.",
""
]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(BadVrrpTimers) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], hello_interval=-1, dead_interval=-1)
assert_that(str(expect.exception), equal_to("VRRP timers values are invalid"))
def test_add_vrrp_with_bad_tracking(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" no ip address",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 1 preempt delay minimum 60").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 1 authentication VLAN1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("standby 1 track SOMETHING decrement VALUE").and_return([
" ^",
"% Invalid input detected at '^' marker.",
""
]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(BadVrrpTracking) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], track_id='SOMETHING', track_decrement='VALUE')
assert_that(str(expect.exception), equal_to("VRRP tracking values are invalid"))
def test_remove_vrrp_success(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" standby 1 ip 1.2.3.4",
" standby 1 ip 5.6.7.8 secondary",
" standby 1 timers 5 15",
" standby 1 priority 110",
" standby 1 preempt delay minimum 60",
" standby 1 authentication VLAN1234",
" standby 1 track 101 decrement 50",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no standby 1").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_vrrp_group(1234, 1)
def test_remove_vrrp_with_invalid_group_id(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" no ip address",
"end"
])
with self.assertRaises(VrrpDoesNotExistForVlan) as expect:
self.switch.remove_vrrp_group(1234, 256)
assert_that(str(expect.exception), equal_to("Vrrp group 256 does not exist for vlan 1234"))
def test_remove_vrrp_from_unknown_vlan(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker.",
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 1234 | begin vlan").once().ordered().and_return([
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.remove_vrrp_group(1234, 1)
assert_that(str(expect.exception), equal_to("Vlan 1234 not found"))
def test_add_dhcp_relay_server(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" no ip address",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("ip helper-address 10.10.10.1").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_dhcp_relay_server(1234, IPAddress('10.10.10.1'))
def test_add_second_dhcp_relay_server(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" no ip address",
" ip helper-address 10.10.10.1",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("ip helper-address 10.10.10.2").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_dhcp_relay_server(1234, IPAddress('10.10.10.2'))
def test_add_same_dhcp_relay_server_fails(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" no ip address",
" ip helper-address 10.10.10.1",
"end"
])
with self.assertRaises(DhcpRelayServerAlreadyExists) as expect:
self.switch.add_dhcp_relay_server(1234, IPAddress('10.10.10.1'))
assert_that(str(expect.exception), equal_to("DHCP relay server 10.10.10.1 already exists on VLAN 1234"))
def test_remove_dhcp_relay_server(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" no ip address",
" ip helper-address 10.10.10.1",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no ip helper-address 10.10.10.1").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_dhcp_relay_server(1234, IPAddress('10.10.10.1'))
def test_remove_non_existent_dhcp_relay_server_fails(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" no ip address",
"end"
])
with self.assertRaises(UnknownDhcpRelayServer) as expect:
self.switch.remove_dhcp_relay_server(1234, IPAddress('10.10.10.1'))
assert_that(str(expect.exception), equal_to("DHCP relay server 10.10.10.1 not found on VLAN 1234"))
def test_get_vlan_interfaces(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config | begin interface").and_return([
"interface FastEthernet0/16",
" switchport access vlan 2222",
" switchport mode access",
"!",
"interface FastEthernet0/17",
" switchport trunk allowed vlan 2222,2998",
" switchport access vlan 2222",
"!",
"interface FastEthernet0/18",
" switchport trunk allowed vlan 2222,2998",
" switchport mode trunk",
"!",
"interface FastEthernet0/19",
" switchport access vlan 1100",
" switchport trunk native vlan 2",
" switchport trunk allowed vlan 2222,2998",
" switchport mode access",
"!",
"interface FastEthernet0/20",
" switchport access vlan 1100",
" switchport trunk native vlan 2222",
" switchport trunk allowed vlan 800,500",
" switchport mode trunk",
"!",
"line con 0",
"transport input ssh"
])
vlan_interfaces = self.switch.get_vlan_interfaces(2222)
assert_that(vlan_interfaces, is_(['FastEthernet0/16', 'FastEthernet0/17', 'FastEthernet0/18', 'FastEthernet0/20']))
def test_get_vlan_interfaces_unknown_vlan_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config | begin interface").and_return([
"interface FastEthernet0/16",
" switchport access vlan 2222",
" switchport mode access",
"!",
"interface FastEthernet0/17",
" switchport trunk allowed vlan 2222,2998",
" switchport access vlan 2222",
"!",
"interface FastEthernet0/18",
" switchport trunk allowed vlan 2222,2998",
" switchport mode trunk",
"!",
"interface FastEthernet0/19",
" switchport access vlan 1100",
" switchport trunk native vlan 2",
" switchport trunk allowed vlan 2222,2998",
" switchport mode access",
"!",
"interface FastEthernet0/20",
" switchport access vlan 1100",
" switchport trunk native vlan 2222",
" switchport trunk allowed vlan 800,500",
" switchport mode trunk",
"!",
"line con 0",
"transport input ssh"
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 1111 | begin vlan").once().ordered().and_return([
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.get_vlan_interfaces(1111)
assert_that(str(expect.exception), equal_to("Vlan 1111 not found"))
def test_set_vlan_icmp_redirects_state_enable(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" no ip address",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("ip redirects").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_vlan_icmp_redirects_state(1234, True)
def test_set_vlan_icmp_redirects_state_disable(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
"Building configuration...",
"Current configuration : 41 bytes",
"!",
"interface Vlan1234",
" no ip address",
"end"
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no ip redirects").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_vlan_icmp_redirects_state(1234, False)
def test_set_vlan_icmp_redirects_state_without_interface_creates_it(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker.",
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 1234 | begin vlan").once().ordered().and_return([
"vlan 1234",
"end",
])
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("interface vlan 1234").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no shutdown").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("no ip redirects").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_vlan_icmp_redirects_state(1234, False)
def test_set_vlan_icmp_redirects_state_unknown_vlan(self):
self.mocked_ssh_client.should_receive("do").with_args("show running-config interface vlan 1234").once().ordered().and_return([
" ^",
"% Invalid input detected at '^' marker.",
])
self.mocked_ssh_client.should_receive("do").with_args("show running-config vlan 1234 | begin vlan").once().ordered().and_return([
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.set_vlan_icmp_redirects_state(1234, False)
assert_that(str(expect.exception), equal_to("Vlan 1234 not found"))
def test_get_versions_success(self):
self.mocked_ssh_client.should_receive("do").with_args("show version").once().ordered().and_return([
"Cisco IOS Software, C3750 Software (C3750-IPSERVICESK9-M), Version 12.2(58)SE2, RELEASE SOFTWARE (fc1)",
"Technical Support: http://www.cisco.com/techsupport",
"Copyright (c) 1986-2011 by Cisco Systems, Inc.",
"Compiled Thu 21-Jul-11 01:53 by prod_rel_team",
"ROM: Bootstrap program is C3750 boot loader",
"BOOTLDR: C3750 Boot Loader (C3750-HBOOT-M) Version 12.2(44)SE5, RELEASE SOFTWARE (fc1)",
"my-switch uptime is 1 year, 18 weeks, 5 days, 1 hour, 11 minutes",
"System returned to ROM by power-on",
"System image file is \"flash:c3750-ipservicesk9-mz.122-58.SE2.bin\"",
"This product contains cryptographic features and is subject to United",
"States and local country laws governing import, export, transfer and",
"use. Delivery of Cisco cryptographic products does not imply",
"third-party authority to import, export, distribute or use encryption.",
"Importers, exporters, distributors and users are responsible for",
"compliance with U.S. and local country laws. By using this product you",
"agree to comply with applicable laws and regulations. If you are unable",
"to comply with U.S. and local laws, return this product immediately.",
"A summary of U.S. laws governing Cisco cryptographic products may be found at:",
"http://www.cisco.com/wwl/export/crypto/tool/stqrg.html",
"If you require further assistance please contact us by sending email to",
"export@cisco.com.",
"cisco WS-C3750G-24TS-1U (PowerPC405) processor (revision H0) with 131072K bytes of memory.",
"Processor board ID FOC1530X2F7",
"Last reset from power-on",
"80 Virtual Ethernet interfaces",
"28 Gigabit Ethernet interfaces",
"The password-recovery mechanism is enabled.",
"512K bytes of flash-simulated non-volatile configuration memory.",
"Base ethernet MAC Address : 00:00:00:00:00:00",
"Motherboard assembly number : 73-10219-09",
"Power supply part number : 341-0098-02",
"Motherboard serial number : FOC153019Z6",
"Power supply serial number : ALD153000BB",
"Model revision number : H0",
"Motherboard revision number : A0",
"Model number : WS-C3750G-24TS-S1U",
"System serial number : FOC1530X2F7",
"Top Assembly Part Number : 800-26859-03",
"Top Assembly Revision Number : C0",
"Version ID : V05",
"CLEI Code Number : COMB600BRA",
"Hardware Board Revision Number : 0x09",
"Switch Ports Model SW Version SW Image",
"------ ----- ----- ---------- ----------",
"* 1 28 WS-C3750G-24TS-1U 12.2(58)SE2 C3750-IPSERVICESK9-M",
"* 2 28 WS-C3750G-24TS-1U 12.2(58)SE2 C3750-IPSERVICESK9-M",
"Configuration register is 0xF",
])
versions = self.switch.get_versions()
assert_that(versions, equal_to({
"units" : {
"1": {
"Ports": "28",
"Model": "WS-C3750G-24TS-1U",
"SW Version": "12.2(58)SE2",
"SW Image": "C3750-IPSERVICESK9-M"
},
"2": {
"Ports": "28",
"Model": "WS-C3750G-24TS-1U",
"SW Version": "12.2(58)SE2",
"SW Image": "C3750-IPSERVICESK9-M"
}
},
"Base ethernet MAC Address": "00:00:00:00:00:00",
"Motherboard assembly number": "73-10219-09",
"Power supply part number": "341-0098-02",
"Motherboard serial number": "FOC153019Z6",
"Power supply serial number": "ALD153000BB",
"Model revision number": "H0",
"Motherboard revision number": "A0",
"Model number": "WS-C3750G-24TS-S1U",
"System serial number": "FOC1530X2F7",
"Top Assembly Part Number": "800-26859-03",
"Top Assembly Revision Number": "C0",
"Version ID": "V05",
"CLEI Code Number": "COMB600BRA",
"Hardware Board Revision Number": "0x09",
}))
def test_reset_interface(self):
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("default interface FastEthernet0/4").and_return([]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).once().ordered()
self.switch.reset_interface("FastEthernet0/4")
def test_reset_interface_unknown_interface_name_raises(self):
self.mocked_ssh_client.should_receive("do").with_args("configure terminal").once().ordered().and_return([
"Enter configuration commands, one per line. End with CNTL/Z."
])
self.mocked_ssh_client.should_receive("do").with_args("default interface WrongInterfaceName0/4").and_return([
" ^"
"% Invalid input detected at '^' marker."
]).once().ordered()
self.mocked_ssh_client.should_receive("do").with_args("exit").and_return([]).once().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.reset_interface("WrongInterfaceName0/4")
assert_that(str(expect.exception), equal_to("Unknown interface WrongInterfaceName0/4"))
| 52.241244
| 162
| 0.64373
| 16,517
| 132,745
| 4.92868
| 0.036811
| 0.055941
| 0.091577
| 0.115297
| 0.908902
| 0.89152
| 0.880035
| 0.863329
| 0.847605
| 0.837508
| 0
| 0.03911
| 0.215466
| 132,745
| 2,540
| 163
| 52.261811
| 0.742575
| 0.004113
| 0
| 0.730144
| 0
| 0.004306
| 0.289155
| 0.002837
| 0
| 0
| 0.000083
| 0
| 0.121053
| 1
| 0.062201
| false
| 0.005263
| 0.009091
| 0
| 0.07177
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
22e0b7187d2d1047f26c52af76b6c87b2bbe72a3
| 3,151
|
py
|
Python
|
2021/11/code.py
|
ErikBavenstrand/Advent-of-Code
|
d4879dfb8d70d817cf57ab6a601f22e91d5ed8e1
|
[
"MIT"
] | null | null | null |
2021/11/code.py
|
ErikBavenstrand/Advent-of-Code
|
d4879dfb8d70d817cf57ab6a601f22e91d5ed8e1
|
[
"MIT"
] | null | null | null |
2021/11/code.py
|
ErikBavenstrand/Advent-of-Code
|
d4879dfb8d70d817cf57ab6a601f22e91d5ed8e1
|
[
"MIT"
] | null | null | null |
# Advent of Code 2021 Day 11
# Author: Erik Båvenstrand
# URL: https://adventofcode.com/2021/day/11
import numpy as np
def part_a(data: list[str]):
grid = np.empty((len(data), len(data)), int)
for i, line in enumerate(data):
grid[i] = list(line)
flashes = 0
for day in range(100):
grid += 1
chaining = True
flashed = set()
while chaining:
rows, cols = np.where(grid > 9)
for i, (r, c) in enumerate(zip(rows, cols)):
if (r, c) in flashed:
rows[i] = -1
cols[i] = -1
flashed.add((r, c))
if np.sum(rows > -1) == 0:
chaining = False
for r, c in zip(rows, cols):
if r == -1 and c == -1:
continue
if r > 0:
grid[r-1, c] += 1
if r < len(data) - 1:
grid[r+1, c] += 1
if c > 0:
grid[r, c-1] += 1
if c < len(data) - 1:
grid[r, c+1] += 1
if r > 0 and c > 0:
grid[r-1, c-1] += 1
if r < len(data) - 1 and c < len(data) - 1:
grid[r+1, c+1] += 1
if c > 0 and r < len(data) - 1:
grid[r+1, c-1] += 1
if c < len(data) - 1 and r > 0:
grid[r-1, c+1] += 1
coords = list(flashed)
for row, col in coords:
grid[row, col] = 0
flashes += len(coords)
return flashes
def part_b(data: list[str]):
grid = np.empty((len(data), len(data)), int)
for i, line in enumerate(data):
grid[i] = list(line)
mega_day = 0
day = 0
while mega_day == 0:
grid += 1
chaining = True
flashed = set()
while chaining:
rows, cols = np.where(grid > 9)
for i, (r, c) in enumerate(zip(rows, cols)):
if (r, c) in flashed:
rows[i] = -1
cols[i] = -1
flashed.add((r, c))
if np.sum(rows > -1) == 0:
chaining = False
for r, c in zip(rows, cols):
if r == -1 and c == -1:
continue
if r > 0:
grid[r-1, c] += 1
if r < len(data) - 1:
grid[r+1, c] += 1
if c > 0:
grid[r, c-1] += 1
if c < len(data) - 1:
grid[r, c+1] += 1
if r > 0 and c > 0:
grid[r-1, c-1] += 1
if r < len(data) - 1 and c < len(data) - 1:
grid[r+1, c+1] += 1
if c > 0 and r < len(data) - 1:
grid[r+1, c-1] += 1
if c < len(data) - 1 and r > 0:
grid[r-1, c+1] += 1
coords = list(flashed)
for row, col in coords:
grid[row, col] = 0
if len(coords) == len(data)**2:
mega_day = day + 1
day += 1
return mega_day
| 30.009524
| 59
| 0.371945
| 432
| 3,151
| 2.699074
| 0.143519
| 0.030875
| 0.06175
| 0.072041
| 0.802744
| 0.802744
| 0.802744
| 0.802744
| 0.802744
| 0.802744
| 0
| 0.065574
| 0.496668
| 3,151
| 104
| 60
| 30.298077
| 0.669609
| 0.029514
| 0
| 0.840909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0
| 0.011364
| 0
| 0.056818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fe1137000165f97fe3fc2b9e3b0e8d649383091d
| 126
|
py
|
Python
|
heritageclues/models.py
|
myrjola/hackforclues
|
7eddb63236421c634e1ade28ebce14ba99aa4d76
|
[
"MIT"
] | null | null | null |
heritageclues/models.py
|
myrjola/hackforclues
|
7eddb63236421c634e1ade28ebce14ba99aa4d76
|
[
"MIT"
] | null | null | null |
heritageclues/models.py
|
myrjola/hackforclues
|
7eddb63236421c634e1ade28ebce14ba99aa4d76
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.postgres.fields import JSONField
from django.contrib.auth.models import User
| 31.5
| 52
| 0.849206
| 19
| 126
| 5.631579
| 0.578947
| 0.280374
| 0.317757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 126
| 3
| 53
| 42
| 0.938596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a3ef2a9dbed151cc2880c04f5ef86b5811b884e7
| 14,650
|
py
|
Python
|
webapp/tests/test_readers_util.py
|
TimWhalen/graphite-web
|
e150af45e01d01141a8767ec0597e218105b9914
|
[
"Apache-2.0"
] | 1
|
2021-01-16T20:10:45.000Z
|
2021-01-16T20:10:45.000Z
|
webapp/tests/test_readers_util.py
|
TimWhalen/graphite-web
|
e150af45e01d01141a8767ec0597e218105b9914
|
[
"Apache-2.0"
] | null | null | null |
webapp/tests/test_readers_util.py
|
TimWhalen/graphite-web
|
e150af45e01d01141a8767ec0597e218105b9914
|
[
"Apache-2.0"
] | null | null | null |
from .base import TestCase
from graphite.readers import merge_with_cache
from graphite.wsgi import application # NOQA makes sure we have a working WSGI app
from six.moves import range
class MergeWithCacheTests(TestCase):
maxDiff = None
def test_merge_with_cache_with_different_step_no_data(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size//2, start+window_size, 1):
cache_results.append((i, None))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='sum'
)
# Generate the expected values
expected_values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
expected_values.append(None)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_sum(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size//2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='sum',
raw_step=1
)
# Generate the expected values
expected_values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
expected_values.append(60)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_sum_no_raw_step(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size//2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='sum'
)
# Generate the expected values
expected_values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
expected_values.append(60)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_sum_same_raw_step(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size//2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='sum'
)
# Generate the expected values
expected_values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
expected_values.append(60)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_sum_and_raw_step(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size//2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='sum',
raw_step=30
)
# Generate the expected values
expected_values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
expected_values.append(2)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_average(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size//2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='average'
)
# Generate the expected values
expected_values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
expected_values.append(1)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_max(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size//2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='max'
)
# Generate the expected values
expected_values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
expected_values.append(1)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_min(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size//2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='min'
)
# Generate the expected values
expected_values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
expected_values.append(1)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_last(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size//2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='last'
)
# Generate the expected values
expected_values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
expected_values.append(1)
self.assertEqual(expected_values, values)
def test_merge_with_cache_with_different_step_bad(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size//2, start+window_size, 1):
cache_results.append((i, 1))
# merge the db results with the cached results
with self.assertRaisesRegexp(Exception, "Invalid consolidation function: 'bad_function'"):
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='bad_function'
)
# In merge_with_cache, if the `values[i] = value` fails, then
# the try block catches the exception and passes. This tests
# that case.
def test_merge_with_cache_beyond_max_range(self):
# Data values from the Reader:
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 7200 # (2 hour)
step = 60 # (1 minute)
# Fill in half the data. Nones for the rest.
values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
values.append(None)
# Generate data that would normally come from Carbon.
# Step will be different since that is what we are testing
cache_results = []
for i in range(start+window_size, start+window_size*2, 1):
cache_results.append((i, None))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values,
func='sum'
)
# Generate the expected values
expected_values = list(range(0, window_size//2, step))
for i in range(0, window_size//2, step):
expected_values.append(None)
self.assertEqual(expected_values, values)
def test_merge_with_cache_when_previous_window_in_cache(self):
start = 1465844460 # (Mon Jun 13 19:01:00 UTC 2016)
window_size = 3600 # (1 hour)
step = 60 # (1 minute)
# simulate db data, no datapoints for the given
# time window
values = self._create_none_window(step)
# simulate cached data with datapoints only
# from the previous window
cache_results = []
prev_window_start = start - window_size
prev_window_end = prev_window_start + window_size
for i in range(prev_window_start, prev_window_end, step):
cache_results.append((i, 1))
# merge the db results with the cached results
values = merge_with_cache(
cached_datapoints=cache_results,
start=start,
step=step,
values=values
)
# the merged results should be a None window because:
# - db results for the window are None
# - cache does not contain relevant points
self.assertEqual(self._create_none_window(step), values)
@staticmethod
def _create_none_window(points_per_window):
return [None for _ in range(0, points_per_window)]
| 35.906863
| 98
| 0.59843
| 1,925
| 14,650
| 4.395325
| 0.068571
| 0.092188
| 0.068904
| 0.079423
| 0.889375
| 0.879211
| 0.879211
| 0.879211
| 0.874719
| 0.874719
| 0
| 0.049272
| 0.319795
| 14,650
| 407
| 99
| 35.995086
| 0.799799
| 0.264437
| 0
| 0.804688
| 0
| 0
| 0.008723
| 0
| 0
| 0
| 0
| 0
| 0.046875
| 1
| 0.050781
| false
| 0
| 0.015625
| 0.003906
| 0.078125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
43434a36cdad47635da46c933ca6ca21f1865f26
| 23,167
|
py
|
Python
|
templatetags/common_templatetags.py
|
IBM/omnia
|
588f380e04c697ca9d5ed84822c14b37dad92d27
|
[
"Apache-2.0"
] | 1
|
2021-11-25T16:07:38.000Z
|
2021-11-25T16:07:38.000Z
|
templatetags/common_templatetags.py
|
IBM/omnia
|
588f380e04c697ca9d5ed84822c14b37dad92d27
|
[
"Apache-2.0"
] | null | null | null |
templatetags/common_templatetags.py
|
IBM/omnia
|
588f380e04c697ca9d5ed84822c14b37dad92d27
|
[
"Apache-2.0"
] | null | null | null |
import calendar
import json
import re
import math
from django import template
from django.db.models import Count, Q
from math import log, floor
from research.models import Artifact, BannerNotification
from research.helpers import hasEditorAccess
register = template.Library()
@register.inclusion_tag("partials/banner_notification.html")
def bannerNotification():
"""
Gets all active banners and displays them at page top,
using the 'banner_notification.html' template.
"""
return {"banners": BannerNotification.objects.filter(active=True)}
@register.filter
def noprotocol(fullUrl):
"""
Strips protocol off URL for nice display/hotlink text.
Example: https://www.someDomain.com/some/path/here/
Return: {string} URL with no protocol (ex: www.someDomain.com/some/path/here/)
"""
returnData = re.sub(r"https?://", "", fullUrl)
returnData = re.sub(r"/$", "", returnData)
return returnData
@register.filter
def replace_underscore(string):
"""
Replaces an underscore with a space.
Return: {string}
"""
return string.replace('_', ' ')
@register.filter
def split(string, sep=','):
"""
Template usage of split()
Example usage: {{ value|split:',' }}
Return: {array} Array of strings separated by the given separator.
"""
return string.split(sep)
@register.filter
def toJson(jsonString):
"""
Takes json or array as a string and returns it as json so it can be parsed in loop.
Example usage: {{ value|toJson }}
Return: {json}
"""
return json.loads(jsonString)
@register.filter()
def formatMinutes(m):
s = math.floor(m * 60)
mins = math.floor(s / 60);
secs = math.floor(s - (mins * 60));
return "%d:%02d" % (mins, secs);
##
## Global template HTML helpers for site consistency and easy redesigns.
##
@register.simple_tag(takes_context=True)
def getTemplateHelpers(context):
horizontalSpace = 'ph3 ph4-ns'
rounded = 'br2'
commonButton = 'bw0 dib pointer ph3 pv2 custom-animate-all border-box lh-copy custom-standard-button ' + rounded
smallButton = 'bw0 dib pointer ph3 pv2 custom-animate-all border-box ' + rounded
tab = rounded + ' custom-tab relative w-auto bg-animate db pointer ph4 pv3 mb0 bw0 fw5 bg-near-white hover-bg-light-gray'
# Icons from Carbon repo.
icons = {
'add': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="6 6 21 21" class="icon add"><defs><style>.cls-1{fill:none;}</style></defs><polygon points="17 15 17 7 15 7 15 15 7 15 7 17 15 17 15 25 17 25 17 17 25 17 25 15 17 15"/><rect class="cls-1" width="32" height="32"/></svg>',
'archive': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32" class="icon archive"><defs><style>.cls-1{fill:none;}</style></defs><rect x="14" y="19" width="4" height="2"/><path d="M6,2V28a2,2,0,0,0,2,2H24a2,2,0,0,0,2-2V2ZM24,28H8V16H24Zm0-14H8V10H24ZM8,8V4H24V8Z"/><rect class="cls-1" width="32" height="32"/></svg>',
'arrowDown': '<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" viewBox="0 0 32 32" class="icon"><defs><style>.cls-1 {fill: none;}</style></defs><polygon points="24.59 16.59 17 24.17 17 4 15 4 15 24.17 7.41 16.59 6 18 16 28 26 18 24.59 16.59"/><rect class="cls-1" width="32" height="32"/></svg>',
'chat': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32"><defs><style>.cls-1{fill:none;}</style></defs><title>chat</title><path d="M17.74,30,16,29l4-7h6a2,2,0,0,0,2-2V8a2,2,0,0,0-2-2H6A2,2,0,0,0,4,8V20a2,2,0,0,0,2,2h9v2H6a4,4,0,0,1-4-4V8A4,4,0,0,1,6,4H26a4,4,0,0,1,4,4V20a4,4,0,0,1-4,4H21.16Z" transform="translate(0 0)"/><rect x="8" y="10" width="16" height="2"/><rect x="8" y="16" width="10" height="2"/><rect class="cls-1" width="32" height="32"/></svg>',
'checkmark': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32" class="icon checkmark"><defs><style>.cls-1{fill:none;}</style></defs><path d="M16,2A14,14,0,1,0,30,16,14,14,0,0,0,16,2Zm0,26A12,12,0,1,1,28,16,12,12,0,0,1,16,28Z"/><polygon points="14 21.5 9 16.54 10.59 14.97 14 18.35 21.41 11 23 12.58 14 21.5"/><rect class="cls-1" width="32" height="32"/></svg>',
'chevronForward': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="8 0 18 24" class="icon chevron-forward"><g data-name="Layer 2"><g data-name="arrow-ios-forward"><rect width="24" height="24" transform="rotate(-90 12 12)" opacity="0"/><path d="M10 19a1 1 0 0 1-.64-.23 1 1 0 0 1-.13-1.41L13.71 12 9.39 6.63a1 1 0 0 1 .15-1.41 1 1 0 0 1 1.46.15l4.83 6a1 1 0 0 1 0 1.27l-5 6A1 1 0 0 1 10 19z"/></g></g></svg>',
'close': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="2 2 20 20" class="icon close"><g data-name="Layer 2"><g data-name="close"><rect width="24" height="24" transform="rotate(180 12 12)" opacity="0"/><path d="M13.41 12l4.3-4.29a1 1 0 1 0-1.42-1.42L12 10.59l-4.29-4.3a1 1 0 0 0-1.42 1.42l4.3 4.29-4.3 4.29a1 1 0 0 0 0 1.42 1 1 0 0 0 1.42 0l4.29-4.3 4.29 4.3a1 1 0 0 0 1.42 0 1 1 0 0 0 0-1.42z"/></g></g></svg>',
'copy': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32" class="icon copy"><defs><style>.cls-1{fill:none;}</style></defs><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" /><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" /><rect class="cls-1" width="32" height="32"/></svg>',
'csv': '<svg version="1.1" xmlns="http://www.w3.org/2000/svg" class="icon csv" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 512 512" style="enable-background:new 0 0 512 512;" xml:space="preserve"><g><g><g><path d="M443.536,141.584L312.208,4.912C309.184,1.776,305.024,0,300.672,0H80c-8.848,0-16,7.168-16,16v480c0,8.832,7.152,16,16,16h352c8.848,0,16-7.168,16-16V152.672C448,148.544,446.4,144.56,443.536,141.584z M304,42.56L401.488,144H304V42.56z M416,480H96V32h176v128c0,8.832,7.152,16,16,16h128V480z"/><path d="M158.208,333.2c2.96-3.136,6.224-5.328,10.016-6.736c6.48-2.4,13.376-2.784,20.944-1.536c2.784,0.464,6.304,1.44,10.4,2.928c2.656,0.928,5.584,0.432,7.76-1.328c2.176-1.776,3.248-4.56,2.864-7.328c-0.272-1.776-0.416-3.456-0.48-5.072c-0.16-3.6-2.688-6.656-6.208-7.456c-2.944-0.672-5.296-1.168-7.04-1.456c-1.776-0.304-4.096-0.576-6.896-0.832c-11.456-1.008-20.224,0.048-28.992,3.296c-6.896,2.544-12.896,6.512-17.856,11.792c-4.912,5.248-8.656,11.84-11.12,19.568c-2.4,7.36-3.6,15.12-3.6,23.04c0,7.184,1.04,14.128,3.104,20.64c2.176,6.896,5.664,12.96,10.336,18c4.688,5.072,10.384,8.96,16.944,11.536c6.368,2.48,13.152,3.744,20.176,3.744c3.216,0,6.88-0.24,10.96-0.736c4.064-0.496,8.384-1.36,13.008-2.608c3.216-0.864,5.584-3.648,5.888-6.976c0.176-1.792,0.368-3.328,0.656-4.672c0.624-2.832-0.336-5.76-2.512-7.68c-2.176-1.92-5.216-2.496-7.936-1.568c-13.36,4.656-24.08,5.232-32.688,1.68c-3.632-1.488-6.656-3.616-9.28-6.496c-2.592-2.832-4.496-6.368-5.84-10.784c-1.424-4.784-2.16-9.872-2.16-15.136c0-5.408,0.88-10.72,2.608-15.808C152.928,340.352,155.184,336.416,158.208,333.2z"/><path d="M277.12,372.768c-1.072-2.08-2.432-4.144-4.16-6.272c-1.536-1.904-3.136-3.664-5.552-5.984l-6.944-6.656c-3.504-3.344-6.32-6.096-8.4-8.224c-2.096-2.112-3.888-4-5.392-5.632c-1.248-1.36-2.256-2.608-3.008-3.728c-0.496-0.752-0.848-1.44-1.072-2.224c-0.176-0.544-0.256-1.136-0.256-1.856c0-1.328,0.336-2.528,1.072-3.776c0.736-1.248,1.728-2.192,3.152-2.992c2.416-1.344,7.104-1.552,12.08-0.656c2.24,0.4,5.008,1.2,8.336,2.336c2.656,0.928,5.584,0.368,7.744-1.424c2.16-1.808,3.2-4.608,2.768-7.376c-0.272-1.68-0.432-3.232-0.464-4.64c-0.064-3.744-2.72-6.944-6.384-7.68c-16.736-3.456-26.016-2.352-33.76,1.68c-4.992,2.576-8.832,6.176-11.424,10.704c-2.512,4.384-3.792,9.184-3.792,14.288c0,2.096,0.208,4.128,0.592,6.112c0.416,2.064,0.992,3.872,1.728,5.632c0.688,1.568,1.664,3.36,3.072,5.44c1.216,1.744,2.704,3.648,4.544,5.712c1.712,1.92,3.488,3.76,5.488,5.68l4.944,4.512c5.424,4.976,9.216,8.624,11.344,10.992c2.016,2.256,3.552,4.096,4.608,5.504c0.672,0.928,1.152,1.776,1.408,2.496c0.24,0.672,0.336,1.376,0.336,2.16c0,1.84-0.4,3.376-1.232,4.704c-0.8,1.248-1.872,2.144-3.424,2.832c-5.888,2.592-14.608,1.264-24.576-3.936c-2.592-1.36-5.744-1.2-8.192,0.464c-2.432,1.648-3.76,4.496-3.472,7.424c0.208,2.016,0.32,3.792,0.32,5.344c0,3.36,2.112,6.368,5.28,7.52c5.344,1.936,9.6,3.168,13.024,3.792c3.408,0.656,6.848,0.96,10.384,0.96c5.824,0,11.248-1.056,16.112-3.152c5.376-2.336,9.52-5.872,12.336-10.512c2.752-4.512,4.144-9.696,4.144-15.424c0-2.368-0.272-4.784-0.816-7.312C279.072,377.248,278.24,374.944,277.12,372.768z"/><path d="M287.952,315.952l23.024,69.76l6.96,23.04c1.008,3.52,4.304,5.84,7.92,5.808l3.328-0.064l3.264,0.064c0.08,0,0.144,0,0.224,0c3.216,0,6.144-1.936,7.376-4.912c3.392-8.08,7.504-17.248,12.304-27.456l16.304-34.896c5.968-12.848,10.8-22.944,14.496-30.24c1.28-2.528,1.12-5.552-0.4-7.936c-1.52-2.384-4.128-3.792-7.024-3.68l-2.64,0.048l-2.624-0.048c-3.392-0.256-6.416,1.856-7.68,4.944c-1.472,3.552-2.656,6.368-3.616,8.496l-28.336,61.616l-18.336-56.656l-3.632-12.624c-1.008-3.52-4.48-6.24-7.984-5.776l-2.496,0.048l-2.528-0.048c-2.624-0.336-5.104,1.088-6.672,3.184C287.584,310.72,287.12,313.456,287.952,315.952z"/></g></g></g></svg>',
'edit': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32" class="icon edit"><defs><style>.cls-1{fill:none;}</style></defs><rect x="2" y="27" width="28" height="2"/><path d="M25.41,9a2,2,0,0,0,0-2.83L21.83,2.59a2,2,0,0,0-2.83,0l-15,15V24h6.41Zm-5-5L24,7.59l-3,3L17.41,7ZM6,22V18.41l10-10L19.59,12l-10,10Z"/><rect class="cls-1" width="32" height="32"/></svg>',
'flag': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32" class="icon"><defs><style>.cls-1{fill:none;}</style></defs><path d="M6,30H4V2H28l-5.8,9L28,20H6ZM6,18H24.33L19.8,11l4.53-7H6Z"/><rect class="cls-1" width="32" height="32"/></svg>',
'email': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" class="icon email"><g data-name="Layer 2"><g data-name="email"><rect width="24" height="24" opacity="0"/><path d="M19 4H5a3 3 0 0 0-3 3v10a3 3 0 0 0 3 3h14a3 3 0 0 0 3-3V7a3 3 0 0 0-3-3zm-.67 2L12 10.75 5.67 6zM19 18H5a1 1 0 0 1-1-1V7.25l7.4 5.55a1 1 0 0 0 .6.2 1 1 0 0 0 .6-.2L20 7.25V17a1 1 0 0 1-1 1z"/></g></g></svg>',
'sad': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32" class="icon sad"><defs><style>.cls-1{fill:none;}</style></defs><path d="M16,2A14,14,0,1,0,30,16,14,14,0,0,0,16,2Zm0,26A12,12,0,1,1,28,16,12,12,0,0,1,16,28Z" transform="translate(0)"/><path d="M11.5,11A2.5,2.5,0,1,0,14,13.5,2.5,2.5,0,0,0,11.5,11Z" transform="translate(0)"/><path d="M20.5,11A2.5,2.5,0,1,0,23,13.5,2.5,2.5,0,0,0,20.5,11Z" transform="translate(0)"/><path d="M16,19a8,8,0,0,0-6.85,3.89l1.71,1a6,6,0,0,1,10.28,0l1.71-1A8,8,0,0,0,16,19Z" transform="translate(0)"/><rect class="cls-1" width="32" height="32"/></svg>',
'grid': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="2 -3 30 30" class="icon"><g data-name="Layer 2"><g data-name="grid"><rect width="24" height="24" opacity="0"/><path d="M9 3H5a2 2 0 0 0-2 2v4a2 2 0 0 0 2 2h4a2 2 0 0 0 2-2V5a2 2 0 0 0-2-2zM5 9V5h4v4z"/><path d="M19 3h-4a2 2 0 0 0-2 2v4a2 2 0 0 0 2 2h4a2 2 0 0 0 2-2V5a2 2 0 0 0-2-2zm-4 6V5h4v4z"/><path d="M9 13H5a2 2 0 0 0-2 2v4a2 2 0 0 0 2 2h4a2 2 0 0 0 2-2v-4a2 2 0 0 0-2-2zm-4 6v-4h4v4z"/><path d="M19 13h-4a2 2 0 0 0-2 2v4a2 2 0 0 0 2 2h4a2 2 0 0 0 2-2v-4a2 2 0 0 0-2-2zm-4 6v-4h4v4z"/></g></g></svg>',
'help': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32" class="icon help"><defs><style>.cls-1{fill:none;}</style></defs><path d="M16,2A14,14,0,1,0,30,16,14,14,0,0,0,16,2Zm0,26A12,12,0,1,1,28,16,12,12,0,0,1,16,28Z"/><circle cx="16" cy="23.5" r="1.5"/><path d="M17,8H15.5A4.49,4.49,0,0,0,11,12.5V13h2v-.5A2.5,2.5,0,0,1,15.5,10H17a2.5,2.5,0,0,1,0,5H15v4.5h2V17a4.5,4.5,0,0,0,0-9Z"/><rect class="cls-1" width="32" height="32"/></svg>',
'idea': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32" class="icon idea"><defs><style>.cls-1{fill:none;}</style></defs><rect x="11" y="24" width="10" height="2"/><rect x="13" y="28" width="6" height="2"/><path d="M16,2A10,10,0,0,0,6,12a9.19,9.19,0,0,0,3.46,7.62c1,.93,1.54,1.46,1.54,2.38h2c0-1.84-1.11-2.87-2.19-3.86A7.2,7.2,0,0,1,8,12a8,8,0,0,1,16,0,7.2,7.2,0,0,1-2.82,6.14c-1.07,1-2.18,2-2.18,3.86h2c0-.92.53-1.45,1.54-2.39A9.18,9.18,0,0,0,26,12,10,10,0,0,0,16,2Z" transform="translate(0 0)"/><rect class="cls-1" width="32" height="32"/></svg>',
'info': '<svg id="icon" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32" class="icon info"><defs><style>.cls-1{fill:none;}</style></defs><polygon points="17 22 17 13 13 13 13 15 15 15 15 22 12 22 12 24 20 24 20 22 17 22"/><path d="M16,7a1.5,1.5,0,1,0,1.5,1.5A1.5,1.5,0,0,0,16,7Z"/><path d="M16,30A14,14,0,1,1,30,16,14,14,0,0,1,16,30ZM16,4A12,12,0,1,0,28,16,12,12,0,0,0,16,4Z"/><rect id="_Transparent_Rectangle_" data-name="<Transparent Rectangle>" class="cls-1" width="32" height="32"/></svg>',
'list': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" class="icon"><g data-name="Layer 2"><g data-name="list"><rect width="24" height="24" transform="rotate(180 12 12)" opacity="0"/><circle cx="4" cy="7" r="1"/><circle cx="4" cy="12" r="1"/><circle cx="4" cy="17" r="1"/><rect x="7" y="11" width="14" height="2" rx=".94" ry=".94"/><rect x="7" y="16" width="14" height="2" rx=".94" ry=".94"/><rect x="7" y="6" width="14" height="2" rx=".94" ry=".94"/></g></g></svg>',
'modal': '<svg xmlns="http://www.w3.org/2000/svg" class="icon modal" width="32" height="32" viewBox="0 0 32 32"><defs><style>.cls-1{fill:none;}</style></defs><path d="M28,4H10A2.0059,2.0059,0,0,0,8,6V20a2.0059,2.0059,0,0,0,2,2H28a2.0059,2.0059,0,0,0,2-2V6A2.0059,2.0059,0,0,0,28,4Zm0,16H10V6H28Z"/><path d="M18,26H4V16H6V14H4a2.0059,2.0059,0,0,0-2,2V26a2.0059,2.0059,0,0,0,2,2H18a2.0059,2.0059,0,0,0,2-2V24H18Z"/><rect class="cls-1" width="32" height="32"/></svg>',
'newWindow': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" class="icon new-window"><g data-name="Layer 2"><g data-name="external-link"><rect width="24" height="24" opacity="0"/><path d="M20 11a1 1 0 0 0-1 1v6a1 1 0 0 1-1 1H6a1 1 0 0 1-1-1V6a1 1 0 0 1 1-1h6a1 1 0 0 0 0-2H6a3 3 0 0 0-3 3v12a3 3 0 0 0 3 3h12a3 3 0 0 0 3-3v-6a1 1 0 0 0-1-1z"/><path d="M16 5h1.58l-6.29 6.28a1 1 0 0 0 0 1.42 1 1 0 0 0 1.42 0L19 6.42V8a1 1 0 0 0 1 1 1 1 0 0 0 1-1V4a1 1 0 0 0-1-1h-4a1 1 0 0 0 0 2z"/></g></g></svg>',
'poop': '<svg viewBox="0 0 24 24" class="icon" width="24" xmlns="http://www.w3.org/2000/svg"><path d="m19.581 24h-15.162c-2.437 0-4.419-1.982-4.419-4.419 0-1.88 1.201-3.508 2.896-4.131-.295-.568-.451-1.202-.451-1.86 0-2.101 1.606-3.833 3.656-4.032-.116-.295-.18-.613-.185-.937-.015-.975.355-1.894 1.039-2.589s1.599-1.077 2.574-1.077c1.166 0 2.114-.948 2.114-2.114v-2.091c0-.27.145-.519.38-.652.233-.133.522-.13.754.008l3.669 2.186c1.3.971 2.052 2.386 2.129 3.926.061 1.203-.296 2.37-.995 3.322 2.198.041 3.975 1.843 3.975 4.051 0 .658-.156 1.292-.451 1.86 1.695.622 2.896 2.25 2.896 4.13 0 2.437-1.982 4.419-4.419 4.419zm-13.084-12.962c-1.407 0-2.552 1.145-2.552 2.552 0 .686.27 1.33.761 1.813.21.208.278.52.176.796-.104.276-.359.467-.654.486-1.53.102-2.728 1.374-2.728 2.896 0 1.609 1.31 2.919 2.919 2.919h15.162c1.609 0 2.919-1.31 2.919-2.919 0-1.522-1.198-2.794-2.728-2.896-.295-.02-.551-.21-.654-.486s-.034-.588.176-.796c.491-.483.761-1.128.761-1.813 0-1.407-1.145-2.552-2.552-2.552h-1.968c-.355 0-.663-.25-.734-.599-.072-.349.111-.699.438-.84.255-.109.476-.25.657-.417.806-.742 1.236-1.796 1.182-2.891-.056-1.094-.589-2.099-1.465-2.757l-2.469-1.467v.772c0 1.993-1.621 3.614-3.614 3.614-.57 0-1.105.224-1.506.63s-.617.946-.608 1.517c.006.432.24.818.627 1.034.298.166.445.514.359.844s-.384.561-.726.561h-1.179z"/><path d="m8 15.485c-.827 0-1.5-.673-1.5-1.5s.673-1.5 1.5-1.5 1.5.673 1.5 1.5-.673 1.5-1.5 1.5zm0-1.501v.002l.75-.001z"/><path d="m16 15.485c-.827 0-1.5-.673-1.5-1.5s.673-1.5 1.5-1.5 1.5.673 1.5 1.5-.673 1.5-1.5 1.5zm0-1.501v.002l.75-.001z"/><path d="m12 22.006c-2.521 0-4.718-1.621-5.467-4.033-.07-.228-.028-.476.113-.668s.366-.305.604-.305h9.5c.238 0 .463.113.604.305.142.192.184.44.113.668-.749 2.412-2.946 4.033-5.467 4.033zm-3.594-3.506c.755 1.23 2.097 2.006 3.594 2.006s2.839-.775 3.594-2.006z"/></svg>',
'sortup': '<svg xmlns="http://www.w3.org/2000/svg" x="0px" y="0px" class="icon" width="32px" height="32px" viewBox="0 0 32 32" style="enable-background:new 0 0 32 32;" xml:space="preserve"><style type="text/css">.st0{fill:none;}</style><polygon points="8,8 16,0 24,8 "/><rect id="_Transparent_Rectangle_" class="st0" width="32" height="32"/></svg>',
'star': '<svg xmlns="http://www.w3.org/2000/svg" id="icon" viewBox="0 0 32 32" class="icon star"><defs><style>.cls-1{fill:none;}</style></defs><path d="M16,6.52l2.76,5.58.46,1,1,.15,6.16.89L22,18.44l-.75.73.18,1,1.05,6.13-5.51-2.89L16,23l-.93.49L9.56,26.34l1-6.13.18-1L10,18.44,5.58,14.09l6.16-.89,1-.15.46-1L16,6.52M16,2l-4.55,9.22L1.28,12.69l7.36,7.18L6.9,30,16,25.22,25.1,30,23.36,19.87l7.36-7.17L20.55,11.22Z"/><rect class="cls-1" width="32" height="32"/></svg>',
'subtract': '<svg version="1.1" xmlns="http://www.w3.org/2000/svg" x="0px" y="0px" width="32px" height="32px" class="icon" viewBox="0 0 32 32" style="enable-background:new 0 0 32 32;" xml:space="preserve"><style type="text/css"> .st0{fill:none;}</style><rect x="8" y="15" width="16" height="2"/><rect class="st0" width="32" height="32"/></svg>',
'table': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32" class="icon table"><defs><style>.cls-1{fill:none;}</style></defs><path d="M27,3H5A2,2,0,0,0,3,5V27a2,2,0,0,0,2,2H27a2,2,0,0,0,2-2V5A2,2,0,0,0,27,3Zm0,2V9H5V5ZM17,11H27v7H17Zm-2,7H5V11H15ZM5,20H15v7H5Zm12,7V20H27v7Z"/><rect class="cls-1" width="32" height="32"/></svg>',
'time': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32" class="icon time"><defs><style>.cls-1{fill:none;}</style></defs><path d="M16,30A14,14,0,1,1,30,16,14,14,0,0,1,16,30ZM16,4A12,12,0,1,0,28,16,12,12,0,0,0,16,4Z"/><polygon points="20.59 22 15 16.41 15 7 17 7 17 15.58 22 20.59 20.59 22"/><rect class="cls-1" width="32" height="32"/></svg>',
'trash': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32" class="icon trash"><defs><style>.cls-1{fill:none;}</style></defs><rect x="12" y="12" width="2" height="12"/><rect x="18" y="12" width="2" height="12"/><path d="M4,6V8H6V28a2,2,0,0,0,2,2H24a2,2,0,0,0,2-2V8h2V6ZM8,28V8H24V28Z"/><rect x="12" y="2" width="8" height="2"/><rect class="cls-1" width="32" height="32"/></svg>',
'unarchive': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32" class="icon unarchive"><defs><style>.cls-1{fill:none;}</style></defs><path d="M25.7,9.3l-7-7A.91.91,0,0,0,18,2H8A2,2,0,0,0,6,4V28a2,2,0,0,0,2,2H24a2,2,0,0,0,2-2V10A.91.91,0,0,0,25.7,9.3ZM18,4.4,23.6,10H18ZM24,28H8V4h8v6a2,2,0,0,0,2,2h6Z"/><polygon points="14 22.18 11.41 19.59 10 21 14 25 22 17 20.59 15.59 14 22.18"/><rect class="cls-1" width="32" height="32"/></svg>',
'userRole': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32" class="icon userrole"><defs><style>.cls-1{fill:none;}</style></defs><polygon points="28.07 21 22 15 28.07 9 29.5 10.41 24.86 15 29.5 19.59 28.07 21"/><path d="M22,30H20V25a5,5,0,0,0-5-5H9a5,5,0,0,0-5,5v5H2V25a7,7,0,0,1,7-7h6a7,7,0,0,1,7,7Z"/><path d="M12,4A5,5,0,1,1,7,9a5,5,0,0,1,5-5m0-2a7,7,0,1,0,7,7A7,7,0,0,0,12,2Z"/><rect id="_Transparent_Rectangle_" class="cls-1" width="32" height="32"/></svg>',
'warn': '<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32" class="icon warn"><defs><style>.cls-1{fill: none;}</style></defs><path d="M16,23a1.5,1.5,0,1,0,1.5,1.5A1.5,1.5,0,0,0,16,23Z"/> <rect x="15" y="12" width="2" height="9"/><path d="M29,30H3a1,1,0,0,1-.8872-1.4614l13-25a1,1,0,0,1,1.7744,0l13,25A1,1,0,0,1,29,30ZM4.6507,28H27.3493l.002-.0033L16.002,6.1714h-.004L4.6487,27.9967Z"/><rect class="cls-1" width="32" height="32"/></svg>'
}
return {
'classes': {
'button': commonButton,
'smallButton': smallButton,
'bluePriButton': 'bg-blue-70 hover-bg-dark-blue white hover-white',
'blueSecButton': 'bg-near-white hover-bg-dark-blue dark-blue hover-white',
'blueTertiaryButton': 'bg-white hover-bg-dark-blue blue-70 hover-white nounderline',
'greenPriButton': 'bg-green hover-bg-dark-green white hover-white',
'redPriButton': 'bg-red hover-bg-dark-red white hover-white',
'redSecButton': 'bg-near-white hover-bg-red dark-red hover-white',
'disabledButton': 'bg-black-10 black-40',
'bulletlist': 'bo-bullet-list',
'grid': horizontalSpace,
'horizontalSpace': horizontalSpace,
'hasIconFlexCenter': 'inline-flex items-center underline-hover',
'imageBorder': 'ba b--black-20',
'leftnavItem': 'custom-animate-all custom-leftnav-item db hover-bg-near-white pa2 pl3 relative underline-hover textcolor f6 fw5',
'leftnavSubnavItem': 'custom-animate-all custom-leftnav-item db hover-bg-near-white pa2 pl4 relative underline-hover textcolor f6',
'link': 'custom-animate-all link linkcolor',
'menunavLink': 'custom-animate-all underline-hover custom-menunav-item db ph3 white hover-light-yellow',
'menunavText': 'custom-menunav-item db ph3 white',
'navItem': 'custom-animate-all underline-hover pa3 link f6 f5-ns db relative hover-dark-blue textcolor',
'overlayContent': 'w-90 bg-white pa4 br2',
'overlayClose': 'mt2 common-modal-close pointer h2 ba br2 ph2 border-box b--blue-70 blue-70 bg-white hover-bg-dark-blue hover-white custom-animate-all',
'pageTitleSecondary': 'fw4',
'rounded': rounded,
'spinner': 'bo-spinner ba br-100',
'tab': tab,
'tableListCell': 'pv3 pr3 bb b--black-20',
'tableListCellSmall': 'pv2 pr2 bb b--black-20 f6',
'tableListCell_bt': 'pv2 bt b--black-20',
'tag': 'common-tag inline-flex items-center ba br2 custom-border-color hover-b--dark-blue hover-bg-dark-blue textcolor hover-white pv1 ph2 mr2 mb2 f6 lh-title bg-near-white',
'textTag': f'dib f6 {rounded} pv1 ph2',
'tooltipCue': 'bb b--black-20 b--dashed pointer normal bt-0 br-0 bl-0',
'yellowMessage': 'ph2 bg-light-yellow bo-fadeout br2',
'websiteHeading': 'f3 fw4 mb3',
},
'html': {
'hr': '<div class="' + horizontalSpace + ' w-100 mv5"><div class="bb b--silver"></div></div>',
'icons': icons,
'tableWidget': {
'sortOnly': 'data-widget="datatable" data-fixed-header="true" data-paging="false" data-searching="false" data-info="false" class="w-100 hover stripe collapse display" width="100%"',
'fullFeatures': 'data-widget="datatable" data-fixed-header="true" data-length-change="false" data-page-length="100" class="w-100 hover stripe collapse display" width="100%" data-buttons=\'["excel"]\' data-dom="lBifrtip"',
},
}
}
@register.filter
def alertTypeIcon(type):
"""
Takes alert type and returns span with icon to represent it.
"""
th = getTemplateHelpers({})
html = ''
if type == 'Great':
html = '<span class="yellow hasiconNoTop">{}</span>'.format(th['html']['icons']['star'])
elif type == 'Good':
html = '<span class="green hasiconNoTop">{}</span>'.format(th['html']['icons']['checkmark'])
elif type == 'Warning':
html = '<span class="orange hasiconNoTop">{}</span>'.format(th['html']['icons']['warn'])
elif type == 'Bad':
html = '<span class="red hasiconNoTop">{}</span>'.format(th['html']['icons']['sad'])
elif type == 'Poop':
html = '<span class="brown hasiconNoTop">{}</span>'.format(th['html']['icons']['poop'])
elif type == 'Info':
html = '<span class="blue hasiconNoTop">{}</span>'.format(th['html']['icons']['info'])
return html
| 119.417526
| 3,721
| 0.661329
| 5,204
| 23,167
| 2.940815
| 0.205035
| 0.033063
| 0.019015
| 0.010193
| 0.400484
| 0.358468
| 0.313513
| 0.29672
| 0.273001
| 0.222295
| 0
| 0.309597
| 0.084689
| 23,167
| 193
| 3,722
| 120.036269
| 0.41212
| 0.032892
| 0
| 0.037037
| 0
| 0.296296
| 0.890799
| 0.443609
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059259
| false
| 0
| 0.066667
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4a30540b21c24e0d7a0bdf376d245bc80d9feb0f
| 9,198
|
py
|
Python
|
Contents/Libraries/Shared/subliminal_patch/providers/utils.py
|
Acidburn0zzz/Sub-Zero.bundle
|
eb3a0d52fde281773ba5109fad9801ede9c938ba
|
[
"MIT"
] | 1
|
2018-02-01T18:00:59.000Z
|
2018-02-01T18:00:59.000Z
|
Contents/Libraries/Shared/subliminal_patch/providers/utils.py
|
Acidburn0zzz/Sub-Zero.bundle
|
eb3a0d52fde281773ba5109fad9801ede9c938ba
|
[
"MIT"
] | null | null | null |
Contents/Libraries/Shared/subliminal_patch/providers/utils.py
|
Acidburn0zzz/Sub-Zero.bundle
|
eb3a0d52fde281773ba5109fad9801ede9c938ba
|
[
"MIT"
] | null | null | null |
FIRST_THOUSAND_OR_SO_USER_AGENTS = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Safari/604.1.38",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Safari/604.1.38",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:56.0) Gecko/20100101 Firefox/56.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0",
"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:56.0) Gecko/20100101 Firefox/56.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/604.3.5 (KHTML, like Gecko) Version/11.0.1 Safari/604.3.5",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/604.3.5 (KHTML, like Gecko) Version/11.0.1 Safari/604.3.5",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:56.0) Gecko/20100101 Firefox/56.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:56.0) Gecko/20100101 Firefox/56.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:56.0) Gecko/20100101 Firefox/56.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0",
"Mozilla/5.0 (Windows NT 6.1; rv:56.0) Gecko/20100101 Firefox/56.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Safari/604.1.38",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:56.0) Gecko/20100101 Firefox/56.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:56.0) Gecko/20100101 Firefox/56.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36 OPR/48.0.2685.52",
"Mozilla/5.0 (iPad; CPU OS 11_0_3 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A432 Safari/604.1",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393",
"Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/61.0.3163.100 Chrome/61.0.3163.100 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36 OPR/48.0.2685.39",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/600.5.17 (KHTML, like Gecko) Version/8.0.5 Safari/600.5.17",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/604.3.5 (KHTML, like Gecko) Version/11.0.1 Safari/604.3.5",
"Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:50.0) Gecko/20100101 Firefox/50.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; Touch; rv:11.0) like Gecko",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:56.0) Gecko/20100101 Firefox/56.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; Trident/5.0)",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/603.2.4 (KHTML, like Gecko) Version/10.1.1 Safari/603.2.4",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:56.0) Gecko/20100101 Firefox/56.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:52.0) Gecko/20100101 Firefox/52.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
]
| 106.953488
| 142
| 0.696891
| 1,798
| 9,198
| 3.526696
| 0.055617
| 0.06939
| 0.117805
| 0.145718
| 0.944961
| 0.944961
| 0.939284
| 0.93613
| 0.935341
| 0.929191
| 0
| 0.253147
| 0.136334
| 9,198
| 85
| 143
| 108.211765
| 0.545065
| 0
| 0
| 0
| 0
| 0.976471
| 0.92357
| 0.002392
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
4a359cf4d79602d9016731c173d35e56cf5bee79
| 34,339
|
py
|
Python
|
src/Ftp_downupload_play.py
|
JeremyHash/LuatPyComUtils
|
8dca96a85752f74a4d008553b9c94b87ac7a99e3
|
[
"MIT"
] | null | null | null |
src/Ftp_downupload_play.py
|
JeremyHash/LuatPyComUtils
|
8dca96a85752f74a4d008553b9c94b87ac7a99e3
|
[
"MIT"
] | null | null | null |
src/Ftp_downupload_play.py
|
JeremyHash/LuatPyComUtils
|
8dca96a85752f74a4d008553b9c94b87ac7a99e3
|
[
"MIT"
] | null | null | null |
import traceback
from utils import Logger
import sys
import re
import binascii
# FTP测试
class Ftp_play_upload:
log = Logger.Logger('./log/FTP.txt', level='debug')
uploadfile = ''
downloadfile = ''
lists = [0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFE,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xE0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x1F,0xFF,0xCF,0xF1,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0x60,0x7C,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFF,0xFF,0xF0,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x7F,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xE0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xF8,0x1F,0xE0,0x00,0x00,0x00,0x00,0x00,0xC0,0x7C,0x03,0xFF,0xFF,0xFF,0xFF,0xF8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x7F,0xFF,0xFF,0xF0,0x3F,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x1F,0xFF,0xFF,0xFE,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x1F,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x3F,0x3F,0xFC,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0xE0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x10,0x0F,0x80,0xF0,0x01,0x00,0xF8,0x0F,0x00,0xF8,0x0F,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x10,0xC1,0x18,0x03,0x01,0x0C,0x11,0x81,0x0C,0x18,0x60,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0xF0,0x20,0x63,0x0C,0x1F,0x02,0x06,0x30,0xC2,0x06,0x30,0x30,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x30,0x63,0x0C,0x03,0x03,0x06,0x30,0xC3,0x06,0x30,0x30,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x30,0x63,0x0C,0x03,0x03,0x06,0x30,0xC3,0x06,0x30,0x30,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x00,0x60,0x0C,0x03,0x00,0x06,0x00,0xC0,0x06,0x38,0x20,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x00,0xC0,0x18,0x03,0x00,0x0C,0x01,0x80,0x0C,0x1E,0x60,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x00,0xC0,0x70,0x03,0x00,0x0C,0x07,0x00,0x0C,0x07,0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x01,0x80,0x0C,0x03,0x00,0x18,0x00,0xC0,0x18,0x19,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x02,0x00,0x04,0x03,0x00,0x20,0x00,0x40,0x20,0x10,0x60,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x04,0x00,0x06,0x03,0x00,0x40,0x00,0x60,0x40,0x30,0x30,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x08,0x23,0x06,0x03,0x00,0x82,0x30,0x60,0x82,0x30,0x30,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x10,0x23,0x06,0x03,0x01,0x02,0x30,0x61,0x02,0x30,0x30,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x20,0x23,0x06,0x03,0x02,0x02,0x30,0x62,0x02,0x30,0x30,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x3F,0xE1,0x0C,0x03,0x03,0xFE,0x10,0xC3,0xFE,0x18,0x60,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0xFE,0x3F,0xE0,0xF8,0x1F,0xE3,0xFE,0x0F,0x83,0xFE,0x07,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
# 构造方法
def __init__(self, ser, num):
self.ser = ser
self.num = num
# 播放
def setbreak(self):
file = open("static/call.mp3", "rb")
row = file.read(10240)
self.ser.timeout = 0.5
cmd = b'AT+FSCREATE="call.mp3"\r\n'
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
cmd = b'AT+FSWRITE="call.mp3",0,10240,20\r\n'
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
cmd = b'%s' % row
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
# 动联专有命令
cmd = b'ATI\r\n'
self.ser.write(cmd)
editionDL = self.ser.read(200).decode(encoding='GB2312')
editionDL = re.search(r'DL', editionDL)
if editionDL:
cmd = b'AT+CCAM=0\r\n'
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
cmd = b'AT+CCAM=2\r\n'
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
cmd = b'AT+REQ=4,254,17\r\n'
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
cmd = bytes.fromhex('c1 e8 c1 e9 0D 0A')
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {str.upper(binascii.b2a_hex(self.ser.read(200)).decode(encoding='GB2312'))}")
cmd = b'AT+JBIG=5760,320,144\r\n'
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
self.ser.write(bytes(self.lists))
self.ser.timeout = 2
self.log.logger.debug(f"收←◆ {str.upper(binascii.b2a_hex(self.ser.read(300)).decode(encoding='GB2312'))}")
cmd = b'AT+QRENCODE=0,0,15,"abcde12345ABCDE2345667890"\r\n'
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {str.upper(binascii.b2a_hex(self.ser.read(200)).decode(encoding='GB2312'))}")
cmd = b'AT+CCAM=3\r\n'
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
cmd = b'AT+CCAM=1\r\n'
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
else:
pass
# 播放音频
cmd = b'AT+CAUDPLAY=1,"call.mp3"\r\n'
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
if self.num >= 1:
#获取upload文件大小
cmd = b'AT+FTPSIZE\r\n'
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.ser.timeout = 5
self.uploadfile = self.ser.read(200).decode(encoding='GB2312')
uploadfile = re.search(r'10240', self.uploadfile).group()
self.ser.timeout = 0.5
cmd = b'AT\r\n'
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
# 获取download文件大小
cmd = b'AT+FTPGETTOFS=0,"call.mp3"\r\n'
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.ser.timeout = 25
self.downloadfile = self.ser.read(200).decode(encoding='GB2312')
downloadfile = re.search(r'10240', self.downloadfile).group()
if int(len(row)) == int(uploadfile) == int(downloadfile):
self.log.logger.debug(u'FTP上传下载第%d次' % self.num)
self.log.logger.debug(u'本次FTP上传下载成功')
else:
self.log.logger.debug(u'FTP上传下载第%d次' % self.num)
self.log.logger.debug(u'本次FTP上传下载失败')
self.ser.timeout = 0.5
cmd = b'AT+SAPBR=0,1\r\n'
self.log.logger.debug(f"发→◇ {cmd.decode(encoding='GB2312')}")
self.ser.write(cmd)
self.log.logger.debug(f"收←◆ {self.ser.read(200).decode(encoding='GB2312')}")
def Ftp_play(ser, num):
try:
test = Ftp_play_upload(ser, num)
test.setbreak()
except KeyboardInterrupt as ke:
print("exit...")
sys.exit()
except Exception as e:
print(e)
print("---------------")
print(traceback.format_exc())
| 258.18797
| 28,813
| 0.760768
| 6,556
| 34,339
| 3.991611
| 0.025778
| 1.661202
| 2.476212
| 3.280828
| 0.943521
| 0.941228
| 0.938553
| 0.934656
| 0.931981
| 0.930605
| 0
| 0.527117
| 0.041527
| 34,339
| 132
| 28,814
| 260.143939
| 0.266217
| 0.001514
| 0
| 0.460177
| 0
| 0.026549
| 0.052919
| 0.040987
| 0
| 1
| 0.672132
| 0
| 0
| 1
| 0.026549
| false
| 0.00885
| 0.044248
| 0
| 0.115044
| 0.035398
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 15
|
4a59b49400429e1cc596cf66cf1ff84624ea336b
| 200
|
py
|
Python
|
hcli_core/hutils.py
|
cometaj2/hcli_core
|
5363cb81843ddec41246e7fcfac7dfcce8bf4b8c
|
[
"MIT"
] | null | null | null |
hcli_core/hutils.py
|
cometaj2/hcli_core
|
5363cb81843ddec41246e7fcfac7dfcce8bf4b8c
|
[
"MIT"
] | null | null | null |
hcli_core/hutils.py
|
cometaj2/hcli_core
|
5363cb81843ddec41246e7fcfac7dfcce8bf4b8c
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import sys
# helps with printing error messages to STDERR
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
| 25
| 64
| 0.755
| 27
| 200
| 5.37037
| 0.740741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 200
| 7
| 65
| 28.571429
| 0.852941
| 0.22
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0.75
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
4a5fe289f03046891907bccf144160ba3659b863
| 17,474
|
py
|
Python
|
magenta/models/polyphony_rnn/polyphony_lib_test.py
|
nkjulia/magenta
|
063d320d59276a15afa0f8a3a8d386ad74594070
|
[
"Apache-2.0"
] | 2,785
|
2020-06-05T03:00:48.000Z
|
2022-03-31T20:59:43.000Z
|
magenta/models/polyphony_rnn/polyphony_lib_test.py
|
nkjulia/magenta
|
063d320d59276a15afa0f8a3a8d386ad74594070
|
[
"Apache-2.0"
] | 242
|
2020-06-04T18:35:42.000Z
|
2022-03-30T09:14:18.000Z
|
magenta/models/polyphony_rnn/polyphony_lib_test.py
|
nkjulia/magenta
|
063d320d59276a15afa0f8a3a8d386ad74594070
|
[
"Apache-2.0"
] | 745
|
2020-06-05T02:32:45.000Z
|
2022-03-30T04:44:20.000Z
|
# Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for polyphony_lib."""
import copy
from magenta.common import testing_lib as common_testing_lib
from magenta.models.polyphony_rnn import polyphony_lib
from note_seq import sequences_lib
from note_seq import testing_lib
from note_seq.protobuf import music_pb2
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
class PolyphonyLibTest(tf.test.TestCase):
def setUp(self):
self.maxDiff = None # pylint:disable=invalid-name
self.note_sequence = common_testing_lib.parse_test_proto(
music_pb2.NoteSequence,
"""
tempos: {
qpm: 60
}
ticks_per_quarter: 220
""")
def testFromQuantizedNoteSequence(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 100, 1.0, 2.0)])
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, steps_per_quarter=1)
poly_seq = list(polyphony_lib.PolyphonicSequence(quantized_sequence))
pe = polyphony_lib.PolyphonicEvent
expected_poly_seq = [
pe(pe.START, None),
# step 0
pe(pe.NEW_NOTE, 64),
pe(pe.NEW_NOTE, 60),
pe(pe.STEP_END, None),
# step 1
pe(pe.NEW_NOTE, 67),
pe(pe.CONTINUED_NOTE, 64),
pe(pe.CONTINUED_NOTE, 60),
pe(pe.STEP_END, None),
# step 2
pe(pe.CONTINUED_NOTE, 64),
pe(pe.CONTINUED_NOTE, 60),
pe(pe.STEP_END, None),
# step 3
pe(pe.CONTINUED_NOTE, 60),
pe(pe.STEP_END, None),
pe(pe.END, None),
]
self.assertEqual(expected_poly_seq, poly_seq)
def testToSequence(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 100, 1.0, 2.0)])
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, steps_per_quarter=1)
poly_seq = polyphony_lib.PolyphonicSequence(quantized_sequence)
poly_seq_ns = poly_seq.to_sequence(qpm=60.0)
# Make comparison easier
poly_seq_ns.notes.sort(key=lambda n: (n.start_time, n.pitch))
self.note_sequence.notes.sort(key=lambda n: (n.start_time, n.pitch))
self.assertEqual(self.note_sequence, poly_seq_ns)
def testToSequenceWithContinuedNotesNotStarted(self):
poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)
pe = polyphony_lib.PolyphonicEvent
poly_events = [
# step 0
pe(pe.NEW_NOTE, 60),
pe(pe.NEW_NOTE, 64),
pe(pe.STEP_END, None),
# step 1
pe(pe.CONTINUED_NOTE, 60),
pe(pe.CONTINUED_NOTE, 64),
pe(pe.CONTINUED_NOTE, 67), # Was not started, should be ignored.
pe(pe.STEP_END, None),
pe(pe.END, None),
]
for event in poly_events:
poly_seq.append(event)
poly_seq_ns = poly_seq.to_sequence(qpm=60.0)
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(60, 100, 0.0, 2.0), (64, 100, 0.0, 2.0)])
# Make comparison easier
poly_seq_ns.notes.sort(key=lambda n: (n.start_time, n.pitch))
self.note_sequence.notes.sort(key=lambda n: (n.start_time, n.pitch))
self.assertEqual(self.note_sequence, poly_seq_ns)
def testToSequenceWithExtraEndEvents(self):
poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)
pe = polyphony_lib.PolyphonicEvent
poly_events = [
# step 0
pe(pe.NEW_NOTE, 60),
pe(pe.END, None), # END event before end. Should be ignored.
pe(pe.NEW_NOTE, 64),
pe(pe.END, None), # END event before end. Should be ignored.
pe(pe.STEP_END, None),
pe(pe.END, None), # END event before end. Should be ignored.
# step 1
pe(pe.CONTINUED_NOTE, 60),
pe(pe.END, None), # END event before end. Should be ignored.
pe(pe.CONTINUED_NOTE, 64),
pe(pe.END, None), # END event before end. Should be ignored.
pe(pe.STEP_END, None),
pe(pe.END, None),
]
for event in poly_events:
poly_seq.append(event)
poly_seq_ns = poly_seq.to_sequence(qpm=60.0)
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(60, 100, 0.0, 2.0), (64, 100, 0.0, 2.0)])
# Make comparison easier
poly_seq_ns.notes.sort(key=lambda n: (n.start_time, n.pitch))
self.note_sequence.notes.sort(key=lambda n: (n.start_time, n.pitch))
self.assertEqual(self.note_sequence, poly_seq_ns)
def testToSequenceWithUnfinishedSequence(self):
poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)
pe = polyphony_lib.PolyphonicEvent
poly_events = [
# step 0
pe(pe.NEW_NOTE, 60),
pe(pe.NEW_NOTE, 64),
# missing STEP_END and END events at end of sequence.
]
for event in poly_events:
poly_seq.append(event)
with self.assertRaises(ValueError):
poly_seq.to_sequence(qpm=60.0)
def testToSequenceWithRepeatedNotes(self):
poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)
pe = polyphony_lib.PolyphonicEvent
poly_events = [
# step 0
pe(pe.NEW_NOTE, 60),
pe(pe.NEW_NOTE, 64),
pe(pe.STEP_END, None),
# step 1
pe(pe.NEW_NOTE, 60),
pe(pe.CONTINUED_NOTE, 64),
pe(pe.STEP_END, None),
pe(pe.END, None),
]
for event in poly_events:
poly_seq.append(event)
poly_seq_ns = poly_seq.to_sequence(qpm=60.0)
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(60, 100, 0.0, 1.0), (64, 100, 0.0, 2.0), (60, 100, 1.0, 2.0)])
# Make comparison easier
poly_seq_ns.notes.sort(key=lambda n: (n.start_time, n.pitch))
self.note_sequence.notes.sort(key=lambda n: (n.start_time, n.pitch))
self.assertEqual(self.note_sequence, poly_seq_ns)
def testToSequenceWithBaseNoteSequence(self):
poly_seq = polyphony_lib.PolyphonicSequence(
steps_per_quarter=1, start_step=1)
pe = polyphony_lib.PolyphonicEvent
poly_events = [
# step 0
pe(pe.NEW_NOTE, 60),
pe(pe.NEW_NOTE, 64),
pe(pe.STEP_END, None),
# step 1
pe(pe.CONTINUED_NOTE, 60),
pe(pe.CONTINUED_NOTE, 64),
pe(pe.STEP_END, None),
pe(pe.END, None),
]
for event in poly_events:
poly_seq.append(event)
base_seq = copy.deepcopy(self.note_sequence)
testing_lib.add_track_to_sequence(
base_seq, 0, [(60, 100, 0.0, 1.0)])
poly_seq_ns = poly_seq.to_sequence(qpm=60.0, base_note_sequence=base_seq)
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(60, 100, 0.0, 1.0), (60, 100, 1.0, 3.0), (64, 100, 1.0, 3.0)])
# Make comparison easier
poly_seq_ns.notes.sort(key=lambda n: (n.start_time, n.pitch))
self.note_sequence.notes.sort(key=lambda n: (n.start_time, n.pitch))
self.assertEqual(self.note_sequence, poly_seq_ns)
def testToSequenceWithEmptySteps(self):
poly_seq = polyphony_lib.PolyphonicSequence(
steps_per_quarter=1)
pe = polyphony_lib.PolyphonicEvent
poly_events = [
# step 0
pe(pe.STEP_END, None),
# step 1
pe(pe.STEP_END, None),
pe(pe.END, None),
]
for event in poly_events:
poly_seq.append(event)
poly_seq_ns = poly_seq.to_sequence(qpm=60.0)
self.note_sequence.total_time = 2
self.assertEqual(self.note_sequence, poly_seq_ns)
def testSetLengthAddSteps(self):
poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)
poly_seq.set_length(5)
self.assertEqual(5, poly_seq.num_steps)
self.assertListEqual([0, 0, 1, 2, 3, 4, 5], poly_seq.steps)
pe = polyphony_lib.PolyphonicEvent
poly_events = [
pe(pe.START, None),
pe(pe.STEP_END, None),
pe(pe.STEP_END, None),
pe(pe.STEP_END, None),
pe(pe.STEP_END, None),
pe(pe.STEP_END, None),
pe(pe.END, None),
]
self.assertEqual(poly_events, list(poly_seq))
# Add 5 more steps to make sure END is managed properly.
poly_seq.set_length(10)
self.assertEqual(10, poly_seq.num_steps)
self.assertListEqual([0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], poly_seq.steps)
pe = polyphony_lib.PolyphonicEvent
poly_events = [
pe(pe.START, None),
pe(pe.STEP_END, None),
pe(pe.STEP_END, None),
pe(pe.STEP_END, None),
pe(pe.STEP_END, None),
pe(pe.STEP_END, None),
pe(pe.STEP_END, None),
pe(pe.STEP_END, None),
pe(pe.STEP_END, None),
pe(pe.STEP_END, None),
pe(pe.STEP_END, None),
pe(pe.END, None),
]
self.assertEqual(poly_events, list(poly_seq))
def testSetLengthAddStepsToSequenceWithoutEnd(self):
poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)
# Construct a list with one silence step and no END.
pe = polyphony_lib.PolyphonicEvent
poly_seq.append(pe(pe.STEP_END, None))
poly_seq.set_length(2)
poly_events = [
pe(pe.START, None),
pe(pe.STEP_END, None),
pe(pe.STEP_END, None),
pe(pe.END, None),
]
self.assertEqual(poly_events, list(poly_seq))
def testSetLengthAddStepsToSequenceWithUnfinishedStep(self):
poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)
# Construct a list with one note and no STEP_END or END.
pe = polyphony_lib.PolyphonicEvent
poly_seq.append(pe(pe.NEW_NOTE, 60))
poly_seq.set_length(2)
poly_events = [
pe(pe.START, None),
pe(pe.NEW_NOTE, 60),
pe(pe.STEP_END, None),
pe(pe.STEP_END, None),
pe(pe.END, None),
]
self.assertEqual(poly_events, list(poly_seq))
def testSetLengthRemoveSteps(self):
poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)
pe = polyphony_lib.PolyphonicEvent
poly_events = [
# step 0
pe(pe.NEW_NOTE, 60),
pe(pe.STEP_END, None),
# step 1
pe(pe.NEW_NOTE, 64),
pe(pe.STEP_END, None),
# step 2
pe(pe.NEW_NOTE, 67),
pe(pe.STEP_END, None),
pe(pe.END, None),
]
for event in poly_events:
poly_seq.append(event)
poly_seq.set_length(2)
poly_events = [
pe(pe.START, None),
# step 0
pe(pe.NEW_NOTE, 60),
pe(pe.STEP_END, None),
# step 1
pe(pe.NEW_NOTE, 64),
pe(pe.STEP_END, None),
pe(pe.END, None),
]
self.assertEqual(poly_events, list(poly_seq))
poly_seq.set_length(1)
poly_events = [
pe(pe.START, None),
# step 0
pe(pe.NEW_NOTE, 60),
pe(pe.STEP_END, None),
pe(pe.END, None),
]
self.assertEqual(poly_events, list(poly_seq))
poly_seq.set_length(0)
poly_events = [
pe(pe.START, None),
pe(pe.END, None),
]
self.assertEqual(poly_events, list(poly_seq))
def testSetLengthRemoveStepsFromSequenceWithoutEnd(self):
poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)
# Construct a list with two silence steps and no END.
pe = polyphony_lib.PolyphonicEvent
poly_seq.append(pe(pe.STEP_END, None))
poly_seq.append(pe(pe.STEP_END, None))
poly_seq.set_length(1)
poly_events = [
pe(pe.START, None),
pe(pe.STEP_END, None),
pe(pe.END, None),
]
self.assertEqual(poly_events, list(poly_seq))
def testSetLengthRemoveStepsFromSequenceWithUnfinishedStep(self):
poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)
# Construct a list with a silence step, a new note, and no STEP_END or END.
pe = polyphony_lib.PolyphonicEvent
poly_seq.append(pe(pe.STEP_END, None))
poly_seq.append(pe(pe.NEW_NOTE, 60))
poly_seq.set_length(1)
poly_events = [
pe(pe.START, None),
pe(pe.STEP_END, None),
pe(pe.END, None),
]
self.assertEqual(poly_events, list(poly_seq))
def testNumSteps(self):
poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)
pe = polyphony_lib.PolyphonicEvent
poly_events = [
# step 0
pe(pe.NEW_NOTE, 60),
pe(pe.NEW_NOTE, 64),
pe(pe.STEP_END, None),
# step 1
pe(pe.CONTINUED_NOTE, 60),
pe(pe.CONTINUED_NOTE, 64),
pe(pe.STEP_END, None),
pe(pe.END, None),
]
for event in poly_events:
poly_seq.append(event)
self.assertEqual(2, poly_seq.num_steps)
self.assertListEqual([0, 0, 0, 0, 1, 1, 1, 2], poly_seq.steps)
def testNumStepsIncompleteStep(self):
poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)
pe = polyphony_lib.PolyphonicEvent
poly_events = [
# step 0
pe(pe.NEW_NOTE, 60),
pe(pe.NEW_NOTE, 64),
pe(pe.STEP_END, None),
# step 1
pe(pe.CONTINUED_NOTE, 60),
pe(pe.CONTINUED_NOTE, 64),
pe(pe.STEP_END, None),
# incomplete step. should not be counted.
pe(pe.NEW_NOTE, 72),
]
for event in poly_events:
poly_seq.append(event)
self.assertEqual(2, poly_seq.num_steps)
self.assertListEqual([0, 0, 0, 0, 1, 1, 1, 2], poly_seq.steps)
def testSteps(self):
pe = polyphony_lib.PolyphonicEvent
poly_events = [
# step 0
pe(pe.NEW_NOTE, 60),
pe(pe.NEW_NOTE, 64),
pe(pe.STEP_END, None),
# step 1
pe(pe.CONTINUED_NOTE, 60),
pe(pe.CONTINUED_NOTE, 64),
pe(pe.STEP_END, None),
pe(pe.END, None),
]
poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)
for event in poly_events:
poly_seq.append(event)
self.assertListEqual([0, 0, 0, 0, 1, 1, 1, 2], poly_seq.steps)
poly_seq = polyphony_lib.PolyphonicSequence(
steps_per_quarter=1, start_step=2)
for event in poly_events:
poly_seq.append(event)
self.assertListEqual([2, 2, 2, 2, 3, 3, 3, 4], poly_seq.steps)
def testTrimTrailingEndEvents(self):
poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)
pe = polyphony_lib.PolyphonicEvent
poly_events = [
# step 0
pe(pe.NEW_NOTE, 60),
pe(pe.STEP_END, None),
pe(pe.END, None),
pe(pe.END, None),
]
for event in poly_events:
poly_seq.append(event)
poly_seq.trim_trailing_end_events()
poly_events_expected = [
pe(pe.START, None),
# step 0
pe(pe.NEW_NOTE, 60),
pe(pe.STEP_END, None),
]
self.assertEqual(poly_events_expected, list(poly_seq))
def testExtractPolyphonicSequences(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0, [(60, 100, 0.0, 4.0)])
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, steps_per_quarter=1)
seqs, _ = polyphony_lib.extract_polyphonic_sequences(quantized_sequence)
self.assertEqual(1, len(seqs))
seqs, _ = polyphony_lib.extract_polyphonic_sequences(
quantized_sequence, min_steps_discard=2, max_steps_discard=5)
self.assertEqual(1, len(seqs))
self.note_sequence.notes[0].end_time = 1.0
self.note_sequence.total_time = 1.0
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, steps_per_quarter=1)
seqs, _ = polyphony_lib.extract_polyphonic_sequences(
quantized_sequence, min_steps_discard=3, max_steps_discard=5)
self.assertEqual(0, len(seqs))
self.note_sequence.notes[0].end_time = 10.0
self.note_sequence.total_time = 10.0
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, steps_per_quarter=1)
seqs, _ = polyphony_lib.extract_polyphonic_sequences(
quantized_sequence, min_steps_discard=3, max_steps_discard=5)
self.assertEqual(0, len(seqs))
def testExtractPolyphonicMultiProgram(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0,
[(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 100, 1.0, 2.0)])
self.note_sequence.notes[0].program = 2
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, steps_per_quarter=1)
seqs, _ = polyphony_lib.extract_polyphonic_sequences(quantized_sequence)
self.assertEqual(0, len(seqs))
def testExtractNonZeroStart(self):
testing_lib.add_track_to_sequence(
self.note_sequence, 0, [(60, 100, 0.0, 4.0)])
quantized_sequence = sequences_lib.quantize_note_sequence(
self.note_sequence, steps_per_quarter=1)
seqs, _ = polyphony_lib.extract_polyphonic_sequences(
quantized_sequence, start_step=4, min_steps_discard=1)
self.assertEqual(0, len(seqs))
seqs, _ = polyphony_lib.extract_polyphonic_sequences(
quantized_sequence, start_step=0, min_steps_discard=1)
self.assertEqual(1, len(seqs))
if __name__ == '__main__':
tf.test.main()
| 29.667233
| 79
| 0.651711
| 2,520
| 17,474
| 4.288095
| 0.084127
| 0.051823
| 0.039237
| 0.053952
| 0.810476
| 0.793078
| 0.770498
| 0.764298
| 0.762539
| 0.739219
| 0
| 0.037549
| 0.233375
| 17,474
| 588
| 80
| 29.717687
| 0.76911
| 0.088646
| 0
| 0.766585
| 0
| 0
| 0.000507
| 0
| 0
| 0
| 0
| 0
| 0.085995
| 1
| 0.054054
| false
| 0
| 0.017199
| 0
| 0.07371
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4abae12d459706c5c7165cc55d895b0d06b32723
| 97,432
|
py
|
Python
|
ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
|
nexr/ambari
|
8452f207d7b9343a162698f2a2b79bf2c512e9d3
|
[
"Apache-2.0"
] | 1
|
2015-05-04T12:19:05.000Z
|
2015-05-04T12:19:05.000Z
|
ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
|
nexr/ambari
|
8452f207d7b9343a162698f2a2b79bf2c512e9d3
|
[
"Apache-2.0"
] | null | null | null |
ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
|
nexr/ambari
|
8452f207d7b9343a162698f2a2b79bf2c512e9d3
|
[
"Apache-2.0"
] | 1
|
2021-01-07T08:55:01.000Z
|
2021-01-07T08:55:01.000Z
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
from unittest import TestCase
from mock.mock import patch, MagicMock
class TestHDP22StackAdvisor(TestCase):
def setUp(self):
import imp
self.maxDiff = None
self.testDirectory = os.path.dirname(os.path.abspath(__file__))
stackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/stack_advisor.py')
hdp206StackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/HDP/2.0.6/services/stack_advisor.py')
hdp21StackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/HDP/2.1/services/stack_advisor.py')
hdp22StackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/HDP/2.2/services/stack_advisor.py')
hdp22StackAdvisorClassName = 'HDP22StackAdvisor'
with open(stackAdvisorPath, 'rb') as fp:
imp.load_module('stack_advisor', fp, stackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
with open(hdp206StackAdvisorPath, 'rb') as fp:
imp.load_module('stack_advisor_impl', fp, hdp206StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
with open(hdp21StackAdvisorPath, 'rb') as fp:
imp.load_module('stack_advisor_impl', fp, hdp21StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
with open(hdp22StackAdvisorPath, 'rb') as fp:
stack_advisor_impl = imp.load_module('stack_advisor_impl', fp, hdp22StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
clazz = getattr(stack_advisor_impl, hdp22StackAdvisorClassName)
self.stackAdvisor = clazz()
# substitute method in the instance
self.get_system_min_uid_real = self.stackAdvisor.get_system_min_uid
self.stackAdvisor.get_system_min_uid = self.get_system_min_uid_magic
@patch('__builtin__.open')
@patch('os.path.exists')
def get_system_min_uid_magic(self, exists_mock, open_mock):
class MagicFile(object):
def read(self):
return """
#test line UID_MIN 200
UID_MIN 500
"""
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __enter__(self):
return self
exists_mock.return_value = True
open_mock.return_value = MagicFile()
return self.get_system_min_uid_real()
def test_recommendTezConfigurations(self):
configurations = {
"yarn-site": {
"properties": {
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "2048",
},
}
}
clusterData = {
"mapMemory": 3000,
"amMemory": 2000,
"reduceMemory": 2056,
"containers": 3,
"ramPerContainer": 256
}
expected = {
"tez-site": {
"properties": {
"tez.am.resource.memory.mb": "4000",
"tez.task.resource.memory.mb": "768",
"tez.runtime.io.sort.mb": "307",
"tez.runtime.unordered.output.buffer.size-mb": "57"
}
},
'yarn-site': {
'properties': {
'yarn.scheduler.minimum-allocation-mb': '256',
'yarn.scheduler.maximum-allocation-mb': '2048'
}
}
}
self.stackAdvisor.recommendTezConfigurations(configurations, clusterData, None, None)
self.assertEquals(configurations, expected)
def test_recommendTezConfigurations_amMemoryMoreThan3072(self):
configurations = {
"yarn-site": {
"properties": {
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "2048",
},
}
}
clusterData = {
"mapMemory": 4000,
"amMemory": 3100,
"reduceMemory": 2056,
"containers": 3,
"ramPerContainer": 256
}
expected = {
"tez-site": {
"properties": {
"tez.am.resource.memory.mb": "3100",
"tez.task.resource.memory.mb": "768",
"tez.runtime.io.sort.mb": "307",
"tez.runtime.unordered.output.buffer.size-mb": "57"
}
},
'yarn-site': {
'properties': {
'yarn.scheduler.minimum-allocation-mb': '256',
'yarn.scheduler.maximum-allocation-mb': '2048'
}
}
}
self.stackAdvisor.recommendTezConfigurations(configurations, clusterData, None, None)
self.assertEquals(configurations, expected)
def test_recommendTezConfigurations_mapMemoryLessThan768(self):
configurations = {
"yarn-site": {
"properties": {
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "2048",
},
}
}
clusterData = {
"mapMemory": 760,
"amMemory": 2000,
"reduceMemory": 760,
"containers": 3,
"ramPerContainer": 256
}
expected = {
"tez-site": {
"properties": {
"tez.am.resource.memory.mb": "4000",
"tez.task.resource.memory.mb": "760",
"tez.runtime.io.sort.mb": "304",
"tez.runtime.unordered.output.buffer.size-mb": "57"
}
},
'yarn-site': {
'properties': {
'yarn.scheduler.minimum-allocation-mb': '256',
'yarn.scheduler.maximum-allocation-mb': '2048'
}
}
}
self.stackAdvisor.recommendTezConfigurations(configurations, clusterData, None, None)
self.assertEquals(configurations, expected)
def test_validateHDFSConfigurations(self):
recommendedDefaults = None
unsecure_cluster_core_site = {
'hadoop.security.authentication': 'simple',
'hadoop.security.authorization': 'false',
}
secure_cluster_core_site = {
'hadoop.security.authentication': 'kerberos',
'hadoop.security.authorization': 'true',
}
# TEST CASE: Unsecured cluster, secure ports
properties = { # hdfs-site
'dfs.datanode.address': '0.0.0.0:1019',
'dfs.datanode.http.address': '0.0.0.0:1022',
}
configurations = {
'hdfs-site': {
'properties': properties,
},
'core-site': {
'properties': unsecure_cluster_core_site
},
'ranger-hdfs-plugin-properties':{
'properties': {'ranger-hdfs-plugin-enabled':'Yes'}
}
}
services = {"services":
[{"StackServices":
{"service_name" : "HDFS",
"service_version" : "2.6.0.2.2",
}
}]
}
expected = [] # No warnings
validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
self.assertEquals(validation_problems, expected)
# TEST CASE: Unsecured cluster, unsecure ports
properties = { # hdfs-site
'dfs.datanode.address': '0.0.0.0:55555',
'dfs.datanode.http.address': '0.0.0.0:55555',
}
configurations = {
'hdfs-site': {
'properties': properties,
},
'core-site': {
'properties': unsecure_cluster_core_site
},
'ranger-hdfs-plugin-properties': {
'properties':{
'ranger-hdfs-plugin-enabled':'Yes'
}
}
}
expected = [] # No warnings
services = {"services":
[{"StackServices":
{"service_name" : "HDFS",
"service_version" : "2.6.0.2.2",
}
}]
}
validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
self.assertEquals(validation_problems, expected)
# TEST CASE: Secure cluster, invalid dfs.http.policy value
properties = { # hdfs-site
'dfs.http.policy': 'WRONG_VALUE',
'dfs.datanode.address': '0.0.0.0:1019',
'dfs.datanode.http.address': '0.0.0.0:1022',
}
configurations = {
'hdfs-site': {
'properties': properties,
},
'core-site': {
'properties': secure_cluster_core_site
},
'ranger-hdfs-plugin-properties': {
'properties':{
'ranger-hdfs-plugin-enabled':'Yes'
}
}
}
expected = [{'config-name': 'dfs.http.policy',
'config-type': 'hdfs-site',
'level': 'WARN',
'message': "Invalid property value: WRONG_VALUE. Valid values are ['HTTP_ONLY', 'HTTPS_ONLY', 'HTTP_AND_HTTPS']",
'type': 'configuration'}]
services = {"services":
[{"StackServices":
{"service_name" : "HDFS",
"service_version" : "2.6.0.2.2",
}
}]
}
validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
self.assertEquals(validation_problems, expected)
# TEST CASE: Secure cluster, dfs.http.policy=HTTPS_ONLY, https address not defined
properties = { # hdfs-site
'dfs.http.policy': 'HTTPS_ONLY',
'dfs.datanode.address': '0.0.0.0:1019',
}
configurations = {
'hdfs-site': {
'properties': properties,
},
'core-site': {
'properties': secure_cluster_core_site
},
'ranger-hdfs-plugin-properties': {
'properties':{
'ranger-hdfs-plugin-enabled':'Yes'
}
}
}
expected = [ ]
services = {"services":
[{"StackServices":
{"service_name" : "HDFS",
"service_version" : "2.6.0.2.2",
}
}]
}
validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
self.assertEquals(validation_problems, expected)
# TEST CASE: Secure cluster, dfs.http.policy=HTTPS_ONLY, https address defined and secure
properties = { # hdfs-site
'dfs.http.policy': 'HTTPS_ONLY',
'dfs.datanode.address': '0.0.0.0:1019',
'dfs.datanode.https.address': '0.0.0.0:1022',
}
configurations = {
'hdfs-site': {
'properties': properties,
},
'core-site': {
'properties': secure_cluster_core_site
},
'ranger-hdfs-plugin-properties': {
'properties':{
'ranger-hdfs-plugin-enabled':'Yes'
}
}
}
expected = []
services = {"services":
[{"StackServices":
{"service_name" : "HDFS",
"service_version" : "2.6.0.2.2",
}
}]
}
validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
self.assertEquals(validation_problems, expected)
# TEST CASE: Secure cluster, dfs.http.policy=HTTPS_ONLY, https address defined and non secure
properties = { # hdfs-site
'dfs.http.policy': 'HTTPS_ONLY',
'dfs.datanode.address': '0.0.0.0:1019',
'dfs.datanode.https.address': '0.0.0.0:50475',
}
configurations = {
'hdfs-site': {
'properties': properties,
},
'core-site': {
'properties': secure_cluster_core_site
},
'ranger-hdfs-plugin-properties': {
'properties':{
'ranger-hdfs-plugin-enabled':'Yes'
}
}
}
expected = []
services = {"services":
[{"StackServices":
{"service_name" : "HDFS",
"service_version" : "2.6.0.2.2",
}
}]
}
validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
self.assertEquals(validation_problems, expected)
# TEST CASE: Secure cluster, dfs.http.policy=HTTPS_ONLY, non secure dfs port, https property not defined
properties = { # hdfs-site
'dfs.http.policy': 'HTTPS_ONLY',
'dfs.datanode.address': '0.0.0.0:50010',
}
configurations = {
'hdfs-site': {
'properties': properties,
},
'core-site': {
'properties': secure_cluster_core_site
},
'ranger-hdfs-plugin-properties': {
'properties':{
'ranger-hdfs-plugin-enabled':'Yes'
}
}
}
expected = [{'config-name': 'dfs.datanode.address',
'config-type': 'hdfs-site',
'level': 'WARN',
'message': "You set up datanode to use some non-secure ports. "
"If you want to run Datanode under non-root user in "
"a secure cluster, you should set all these properties "
"['dfs.datanode.address', 'dfs.datanode.https.address'] "
"to use non-secure ports (if property "
"dfs.datanode.https.address does not exist, just add it). "
"You may also set up property dfs.data.transfer.protection "
"('authentication' is a good default value). Also, set up "
"WebHDFS with SSL as described in manual in order to "
"be able to use HTTPS.",
'type': 'configuration'},
{'config-name': 'dfs.datanode.https.address',
'config-type': 'hdfs-site',
'level': 'WARN',
'message': "You set up datanode to use some non-secure ports. "
"If you want to run Datanode under non-root user in "
"a secure cluster, you should set all these properties "
"['dfs.datanode.address', 'dfs.datanode.https.address'] "
"to use non-secure ports (if property dfs.datanode.https.address "
"does not exist, just add it). You may also set up property "
"dfs.data.transfer.protection ('authentication' is a good default value). "
"Also, set up WebHDFS with SSL as described in manual in "
"order to be able to use HTTPS.",
'type': 'configuration'}
]
validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
self.assertEquals(validation_problems, expected)
# TEST CASE: Secure cluster, dfs.http.policy=HTTPS_ONLY, non secure dfs port, https defined and secure
properties = { # hdfs-site
'dfs.http.policy': 'HTTPS_ONLY',
'dfs.datanode.address': '0.0.0.0:50010',
'dfs.datanode.https.address': '0.0.0.0:1022',
}
configurations = {
'hdfs-site': {
'properties': properties,
},
'core-site': {
'properties': secure_cluster_core_site
},
'ranger-hdfs-plugin-properties': {
'properties':{
'ranger-hdfs-plugin-enabled':'Yes'
}
}
}
expected = [{'config-name': 'dfs.datanode.address',
'config-type': 'hdfs-site',
'level': 'WARN',
'message': "You set up datanode to use some non-secure ports. "
"If you want to run Datanode under non-root user in "
"a secure cluster, you should set all these properties "
"['dfs.datanode.address', 'dfs.datanode.https.address'] "
"to use non-secure ports (if property dfs.datanode.https.address "
"does not exist, just add it). You may also set up property "
"dfs.data.transfer.protection ('authentication' is a good "
"default value). Also, set up WebHDFS with SSL as described "
"in manual in order to be able to use HTTPS.",
'type': 'configuration'},
{'config-name': 'dfs.datanode.https.address',
'config-type': 'hdfs-site',
'level': 'WARN',
'message': "You set up datanode to use some non-secure ports. "
"If you want to run Datanode under non-root user in "
"a secure cluster, you should set all these properties "
"['dfs.datanode.address', 'dfs.datanode.https.address'] "
"to use non-secure ports (if property dfs.datanode.https.address "
"does not exist, just add it). You may also set up property "
"dfs.data.transfer.protection ('authentication' is a good default value). "
"Also, set up WebHDFS with SSL as described in manual in order to be "
"able to use HTTPS.",
'type': 'configuration'}
]
validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
self.assertEquals(validation_problems, expected)
# TEST CASE: Secure cluster, dfs.http.policy=HTTPS_ONLY, valid non-root configuration
properties = { # hdfs-site
'dfs.http.policy': 'HTTPS_ONLY',
'dfs.datanode.address': '0.0.0.0:50010',
'dfs.datanode.https.address': '0.0.0.0:50475',
'dfs.data.transfer.protection': 'authentication',
}
configurations = {
'hdfs-site': {
'properties': properties,
},
'core-site': {
'properties': secure_cluster_core_site
},
'ranger-hdfs-plugin-properties': {
'properties':{
'ranger-hdfs-plugin-enabled':'Yes'
}
}
}
expected = []
validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
self.assertEquals(validation_problems, expected)
# TEST CASE: Secure cluster, dfs.http.policy=HTTP_ONLY, insecure port
properties = { # hdfs-site
'dfs.http.policy': 'HTTP_ONLY',
'dfs.datanode.address': '0.0.0.0:1019',
'dfs.datanode.http.address': '0.0.0.0:50475',
}
configurations = {
'hdfs-site': {
'properties': properties,
},
'core-site': {
'properties': secure_cluster_core_site
},
'ranger-hdfs-plugin-properties': {
'properties':{
'ranger-hdfs-plugin-enabled':'Yes'
}
}
}
expected = [{'config-name': 'dfs.datanode.address',
'config-type': 'hdfs-site',
'level': 'WARN',
'message': "You have set up datanode to use some non-secure ports, "
"but dfs.http.policy is set to HTTP_ONLY. In a secure cluster, "
"Datanode forbids using non-secure ports if dfs.http.policy is not "
"set to HTTPS_ONLY. Please make sure that properties "
"['dfs.datanode.address', 'dfs.datanode.http.address'] use secure ports.",
'type': 'configuration'},
{'config-name': 'dfs.datanode.http.address',
'config-type': 'hdfs-site',
'level': 'WARN',
'message': "You have set up datanode to use some non-secure ports, "
"but dfs.http.policy is set to HTTP_ONLY. In a secure cluster, "
"Datanode forbids using non-secure ports if dfs.http.policy is not "
"set to HTTPS_ONLY. Please make sure that properties "
"['dfs.datanode.address', 'dfs.datanode.http.address'] use secure ports.",
'type': 'configuration'}
]
validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
self.assertEquals(validation_problems, expected)
# TEST CASE: Secure cluster, dfs.http.policy=HTTP_ONLY, valid configuration
properties = { # hdfs-site
'dfs.http.policy': 'HTTP_ONLY',
'dfs.datanode.address': '0.0.0.0:1019',
'dfs.datanode.http.address': '0.0.0.0:1022',
}
configurations = {
'hdfs-site': {
'properties': properties,
},
'core-site': {
'properties': secure_cluster_core_site
},
'ranger-hdfs-plugin-properties': {
'properties':{
'ranger-hdfs-plugin-enabled':'Yes'
}
}
}
expected = []
validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
self.assertEquals(validation_problems, expected)
# TEST CASE: Secure cluster, absent dfs.http.policy (typical situation)
properties = { # hdfs-site
'dfs.datanode.address': '0.0.0.0:1019',
'dfs.datanode.http.address': '0.0.0.0:1022',
}
configurations = {
'hdfs-site': {
'properties': properties,
},
'core-site': {
'properties': secure_cluster_core_site
},
'ranger-hdfs-plugin-properties': {
'properties':{
'ranger-hdfs-plugin-enabled':'Yes'
}
}
}
expected = []
validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
self.assertEquals(validation_problems, expected)
# TEST CASE: Secure cluster, dfs.http.policy=HTTP_ONLY, misusage of dfs.data.transfer.protection warning
properties = { # hdfs-site
'dfs.http.policy': 'HTTP_ONLY',
'dfs.datanode.address': '0.0.0.0:1019',
'dfs.datanode.http.address': '0.0.0.0:1022',
'dfs.data.transfer.protection': 'authentication',
}
configurations = {
'hdfs-site': {
'properties': properties,
},
'core-site': {
'properties': secure_cluster_core_site
},
'ranger-hdfs-plugin-properties': {
'properties':{
'ranger-hdfs-plugin-enabled':'Yes'
}
}
}
expected = [{'config-name': 'dfs.data.transfer.protection',
'config-type': 'hdfs-site',
'level': 'WARN',
'message': "dfs.data.transfer.protection property can not be used when dfs.http.policy is "
"set to any value other then HTTPS_ONLY. Tip: When dfs.http.policy property is not defined, it defaults to HTTP_ONLY",
'type': 'configuration'}]
validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
self.assertEquals(validation_problems, expected)
# TEST CASE: Secure cluster, dfs.http.policy=HTTPS_ONLY, wrong dfs.data.transfer.protection value
properties = { # hdfs-site
'dfs.http.policy': 'HTTPS_ONLY',
'dfs.datanode.address': '0.0.0.0:50010',
'dfs.datanode.https.address': '0.0.0.0:50475',
'dfs.data.transfer.protection': 'WRONG_VALUE',
}
configurations = {
'hdfs-site': {
'properties': properties,
},
'core-site': {
'properties': secure_cluster_core_site
},
'ranger-hdfs-plugin-properties': {
'properties':{
'ranger-hdfs-plugin-enabled':'Yes'
}
}
}
expected = [{'config-name': 'dfs.data.transfer.protection',
'config-type': 'hdfs-site',
'level': 'WARN',
'message': "Invalid property value: WRONG_VALUE. Valid values are ['authentication', 'integrity', 'privacy'].",
'type': 'configuration'}]
validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
self.assertEquals(validation_problems, expected)
# TEST CASE: Hadoop wire encryption enabled
properties = { # hdfs-site
'dfs.encrypt.data.transfer': 'true', # Wire encryption
'dfs.datanode.address': '0.0.0.0:1019',
'dfs.datanode.http.address': '0.0.0.0:1022',
}
configurations = {
'hdfs-site': {
'properties': properties,
},
'core-site': {
'properties': secure_cluster_core_site
},
'ranger-hdfs-plugin-properties': {
'properties':{
'ranger-hdfs-plugin-enabled':'Yes'
}
}
}
expected = [] # No warnings
validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, None)
self.assertEquals(validation_problems, expected)
def test_recommendYARNConfigurations(self):
configurations = {}
clusterData = {
"cpu": 4,
"containers" : 5,
"ramPerContainer": 256
}
expected = {
"yarn-env": {
"properties": {
"min_user_id": "500"
}
},
"yarn-site": {
"properties": {
"yarn.nodemanager.resource.memory-mb": "1280",
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "1280",
"yarn.scheduler.maximum-allocation-vcores": "4",
"yarn.scheduler.minimum-allocation-vcores": "1",
"yarn.nodemanager.resource.cpu-vcores": "4"
}
}
}
self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, None, None)
self.assertEquals(configurations, expected)
def test_recommendYARNConfigurationAttributes(self):
configurations = {
"yarn-env": {
"properties": {
"min_user_id": "500"
}
},
"yarn-site": {
"properties": {
"yarn.nodemanager.resource.memory-mb": "1280",
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "1280",
"yarn.nodemanager.resource.cpu-vcores": "2"
},
}
}
clusterData = {
"cpu": 4,
"containers" : 5,
"ramPerContainer": 256
}
expected = {
"yarn-env": {
"properties": {
"min_user_id": "500"
}
},
"yarn-site": {
"properties": {
"yarn.nodemanager.resource.memory-mb": "1280",
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-vcores": "2",
"yarn.scheduler.minimum-allocation-vcores": "1",
"yarn.scheduler.maximum-allocation-mb": "1280",
"yarn.nodemanager.resource.cpu-vcores": "2"
},
"property_attributes": {
'yarn.nodemanager.resource.memory-mb': {'maximum': '1877'},
'yarn.nodemanager.resource.cpu-vcores': {'maximum': '2'},
'yarn.scheduler.minimum-allocation-vcores': {'maximum': '2'},
'yarn.scheduler.maximum-allocation-vcores': {'maximum': '2'},
'yarn.scheduler.minimum-allocation-mb': {'maximum': '1280'},
'yarn.scheduler.maximum-allocation-mb': {'maximum': '1280'}
}
}
}
services = {
"services": [
{
"href": "/api/v1/stacks/HDP/versions/2.2/services/YARN",
"StackServices": {
"service_name": "YARN",
"service_version": "2.6.0.2.2",
"stack_name": "HDP",
"stack_version": "2.2"
},
"components": [
{
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "APP_TIMELINE_SERVER",
"display_name": "App Timeline Server",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "SLAVE",
"component_name": "NODEMANAGER",
"display_name": "NodeManager",
"is_client": "false",
"is_master": "false",
"hostnames": [
"c6403.ambari.apache.org"
]
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1-2",
"component_category": "MASTER",
"component_name": "RESOURCEMANAGER",
"display_name": "ResourceManager",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "CLIENT",
"component_name": "YARN_CLIENT",
"display_name": "YARN Client",
"is_client": "true",
"is_master": "false",
"hostnames": []
},
"dependencies": []
}
]
},
],
"configurations": configurations,
"changed-configurations": [
{
"type": "yarn-site",
"name": "yarn.nodemanager.resource.memory-mb"
},
{
"type": "yarn-site",
"name": "yarn.scheduler.minimum-allocation-mb"
},
{
"type": "yarn-site",
"name": "yarn.scheduler.maximum-allocation-mb"
},
{
"type": "yarn-site",
"name": "yarn.nodemanager.resource.cpu-vcores"
},
{
"type": "yarn-env",
"name": "min_user_id"
},
]
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/c6401.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6401.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6401.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6402.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6402.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6402.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6403.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6403.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6403.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
}
]
}
self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Test host NodeManager CPU cores
hosts["items"][2]["Hosts"]["cpu_count"] = 6
services["changed-configurations"].remove({
"type": "yarn-site",
"name": "yarn.nodemanager.resource.cpu-vcores"
})
configurations["yarn-site"]["properties"].pop("yarn.nodemanager.resource.cpu-vcores", None)
expected["yarn-site"]["properties"]["yarn.nodemanager.resource.cpu-vcores"] = '4'
expected["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-vcores"] = '1'
expected["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-vcores"] = '4'
expected["yarn-site"]["property_attributes"]["yarn.nodemanager.resource.cpu-vcores"]["maximum"] = '12'
expected["yarn-site"]["property_attributes"]["yarn.scheduler.minimum-allocation-vcores"]["maximum"] = '4'
expected["yarn-site"]["property_attributes"]["yarn.scheduler.maximum-allocation-vcores"]["maximum"] = '4'
self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Test host NodeManager CPU cores and 'yarn.nodemanager.resource.percentage-physical-cpu-limit'
hosts["items"][2]["Hosts"]["cpu_count"] = 10
configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.percentage-physical-cpu-limit"] = '0.5'
services["changed-configurations"].append({
"type": "yarn-site",
"name": "yarn.nodemanager.resource.percentage-physical-cpu-limit"
})
expected["yarn-site"]["properties"]["yarn.nodemanager.resource.cpu-vcores"] = '5'
expected["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-vcores"] = '1'
expected["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-vcores"] = '5'
expected["yarn-site"]["properties"]["yarn.nodemanager.resource.percentage-physical-cpu-limit"] = '0.5'
expected["yarn-site"]["property_attributes"]["yarn.nodemanager.resource.cpu-vcores"]["maximum"] = '20'
expected["yarn-site"]["property_attributes"]["yarn.scheduler.minimum-allocation-vcores"]["maximum"] = '5'
expected["yarn-site"]["property_attributes"]["yarn.scheduler.maximum-allocation-vcores"]["maximum"] = '5'
self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_recommendHiveConfigurationAttributes(self):
self.maxDiff = None
configurations = {
"yarn-site": {
"properties": {
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "8192",
},
},
"capacity-scheduler": {
"properties": {
"yarn.scheduler.capacity.root.queues": "queue1,queue2"
}
}
}
clusterData = {
"cpu": 4,
"mapMemory": 3000,
"amMemory": 2000,
"reduceMemory": 2056,
"containers": 3,
"ramPerContainer": 256
}
expected = {
'capacity-scheduler': {
'properties': {
'yarn.scheduler.capacity.root.queues': 'queue1,queue2'
}
},
'yarn-site': {
'properties': {
'yarn.scheduler.minimum-allocation-mb': '256',
'yarn.scheduler.maximum-allocation-mb': '8192'
}
},
'hive-env': {
'properties': {
'cost_based_optimizer': 'On',
'hive_exec_orc_storage_strategy': 'SPEED',
'hive_security_authorization': 'None',
'hive_timeline_logging_enabled': 'true',
'hive_txn_acid': 'off'
}
},
'hive-site': {
'properties': {
'hive.auto.convert.join.noconditionaltask.size': '268435456',
'hive.cbo.enable': 'true',
'hive.compactor.initiator.on': 'false',
'hive.compactor.worker.threads': '0',
'hive.compute.query.using.stats ': 'true',
'hive.enforce.bucketing': 'false',
'hive.exec.dynamic.partition.mode': 'strict',
'hive.exec.failure.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
'hive.exec.orc.compression.strategy': 'SPEED',
'hive.exec.orc.default.compress': 'ZLIB',
'hive.exec.orc.default.stripe.size': '67108864',
'hive.exec.orc.encoding.strategy': 'SPEED',
'hive.exec.post.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
'hive.exec.pre.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
'hive.exec.reducers.bytes.per.reducer': '67108864',
'hive.execution.engine': 'mr',
'hive.optimize.index.filter': 'true',
'hive.optimize.sort.dynamic.partition': 'false',
'hive.prewarm.enabled': 'false',
'hive.prewarm.numcontainers': '3',
'hive.security.authorization.enabled': 'false',
'hive.server2.use.SSL': 'false',
'hive.stats.fetch.column.stats': 'true',
'hive.stats.fetch.partition.stats': 'true',
'hive.support.concurrency': 'false',
'hive.tez.auto.reducer.parallelism': 'true',
'hive.tez.container.size': '768',
'hive.tez.dynamic.partition.pruning': 'true',
'hive.tez.java.opts': '-server -Xmx615m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps',
'hive.txn.manager': 'org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager',
'hive.vectorized.execution.enabled': 'true',
'hive.vectorized.execution.reduce.enabled': 'false'
},
'property_attributes': {
'hive.auto.convert.join.noconditionaltask.size': {'maximum': '805306368'},
'hive.server2.authentication.pam.services': {'delete': 'true'},
'hive.server2.custom.authentication.class': {'delete': 'true'},
'hive.server2.authentication.ldap.baseDN': {'delete': 'true'},
'hive.server2.authentication.kerberos.principal': {'delete': 'true'},
'hive.server2.authentication.kerberos.keytab': {'delete': 'true'},
'hive.server2.authentication.ldap.url': {'delete': 'true'}
}
},
'hiveserver2-site': {
'properties': {
'hive.server2.enable.doAs': 'true',
'hive.server2.tez.default.queues': "queue1,queue2",
'hive.server2.tez.initialize.default.sessions': 'false',
'hive.server2.tez.sessions.per.default.queue': '1',
'tez.session.am.dag.submit.timeout.secs': '600'
},
'property_attributes': {
'hive.server2.tez.default.queues': {
'entries': [{'value': 'queue1', 'label': 'queue1 queue'}, {'value': 'queue2', 'label': 'queue2 queue'}]
}
}
},
}
services = {
"services": [
{
"href": "/api/v1/stacks/HDP/versions/2.2/services/YARN",
"StackServices": {
"service_name": "YARN",
"service_version": "2.6.0.2.2",
"stack_name": "HDP",
"stack_version": "2.2"
},
"components": [
{
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "APP_TIMELINE_SERVER",
"display_name": "App Timeline Server",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "SLAVE",
"component_name": "NODEMANAGER",
"display_name": "NodeManager",
"is_client": "false",
"is_master": "false",
"hostnames": [
"c6403.ambari.apache.org"
]
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1-2",
"component_category": "MASTER",
"component_name": "RESOURCEMANAGER",
"display_name": "ResourceManager",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "CLIENT",
"component_name": "YARN_CLIENT",
"display_name": "YARN Client",
"is_client": "true",
"is_master": "false",
"hostnames": []
},
"dependencies": []
}
]
},
],
"configurations": configurations,
"changed-configurations": [ ]
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/c6401.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6401.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6401.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6402.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6402.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6402.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6403.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6403.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6403.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
}
]
}
self.stackAdvisor.recommendHIVEConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
#test recommendations
configurations = expected
configurations["hive-site"]["properties"]["hive.cbo.enable"] = "false"
configurations["hive-env"]["properties"]["hive_security_authorization"] = "sqlstdauth"
services["configurations"] = configurations
services["changed-configurations"] = [{"type": "hive-site", "key": "hive.cbo.enable"},
{"type": "hive-env", "key": "hive_security_authorization"}]
expected["hive-site"]["properties"]["hive.stats.fetch.partition.stats"]="false"
expected["hive-site"]["properties"]["hive.stats.fetch.column.stats"]="false"
expected["hive-site"]["properties"]["hive.security.metastore.authorization.manager"]=\
",org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly"
self.stackAdvisor.recommendHIVEConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_recommendMapredConfigurationAttributesWithPigService(self):
configurations = {
"mapred-site": {
"properties": {
"mapreduce.map.memory.mb": "1024",
"mapreduce.reduce.memory.mb": "682",
"yarn.app.mapreduce.am.command-opts": "-Xmx546m -Dhdp.version=${hdp.version}",
"mapreduce.reduce.java.opts": "-Xmx546m",
"yarn.app.mapreduce.am.resource.mb": "682",
"mapreduce.map.java.opts": "-Xmx546m",
"mapreduce.task.io.sort.mb": "273"
}
},
"yarn-site": {
"properties": {
"yarn.nodemanager.resource.memory-mb": "1280",
"yarn.scheduler.minimum-allocation-mb": "100",
"yarn.scheduler.maximum-allocation-mb": "1280",
"yarn.nodemanager.resource.cpu-vcores": "2"
},
}
}
clusterData = {
"cpu": 4,
"containers" : 5,
"ramPerContainer": 256
}
expected = {
"yarn-env": {
"properties": {
"min_user_id": "500"
}
},
"mapred-site": {
"properties": {
"mapreduce.map.memory.mb": "1500",
"mapreduce.reduce.memory.mb": "200",
"yarn.app.mapreduce.am.command-opts": "-Xmx80m -Dhdp.version=${hdp.version}",
"mapreduce.reduce.java.opts": "-Xmx160m",
"yarn.app.mapreduce.am.resource.mb": "100",
"mapreduce.map.java.opts": "-Xmx1200m",
"mapreduce.task.io.sort.mb": "840"
},
"property_attributes": {
'mapreduce.task.io.sort.mb': {'maximum': '2047'},
'yarn.app.mapreduce.am.resource.mb': {'maximum': '1280',
'minimum': '100'},
'mapreduce.map.memory.mb': {'maximum': '1280',
'minimum': '100'},
'mapreduce.reduce.memory.mb': {'maximum': '1280',
'minimum': '100'}
}
},
"yarn-site": {
"properties": {
"yarn.nodemanager.resource.memory-mb": "1280",
"yarn.scheduler.minimum-allocation-mb": "100",
"yarn.scheduler.maximum-allocation-vcores": "1",
"yarn.scheduler.minimum-allocation-vcores": "1",
"yarn.scheduler.maximum-allocation-mb": "1280",
"yarn.nodemanager.resource.cpu-vcores": "1"
},
"property_attributes": {
'yarn.nodemanager.resource.memory-mb': {'maximum': '1877'},
'yarn.nodemanager.resource.cpu-vcores': {'maximum': '2'},
'yarn.scheduler.minimum-allocation-vcores': {'maximum': '1'},
'yarn.scheduler.maximum-allocation-vcores': {'maximum': '1'},
'yarn.scheduler.minimum-allocation-mb': {'maximum': '1280'},
'yarn.scheduler.maximum-allocation-mb': {'maximum': '1280'}
}
}
}
services = {
"services": [
{
"href": "/api/v1/stacks/HDP/versions/2.2/services/PIG",
"StackServices": {
"service_name": "PIG",
"service_version": "2.6.0.2.2",
"stack_name": "HDP",
"stack_version": "2.2"
}, "components": [
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "0+",
"component_category": "CLIENT",
"component_name": "PIG",
"display_name": "Pig",
"is_client": "true",
"is_master": "false",
"hostnames": []
},
"dependencies": []
}
]
},
{
"href" : "/api/v1/stacks/HDP/versions/2.2/services/MAPREDUCE2",
"StackServices" : {
"service_name" : "MAPREDUCE2",
"service_version" : "2.6.0.2.2",
"stack_name" : "HDP",
"stack_version" : "2.2"
},
"components" : [ {
"href" : "/api/v1/stacks/HDP/versions/2.2/services/MAPREDUCE2/components/HISTORYSERVER",
"StackServiceComponents" : {
"advertise_version" : "true",
"cardinality" : "1",
"component_category" : "MASTER",
"component_name" : "HISTORYSERVER",
"custom_commands" : [ ],
"display_name" : "History Server",
"is_client" : "false",
"is_master" : "true",
"service_name" : "MAPREDUCE2",
"stack_name" : "HDP",
"stack_version" : "2.2",
"hostnames" : [ "c6402.ambari.apache.org" ]
},
"auto_deploy" : {
"enabled" : "true",
"location" : "YARN/RESOURCEMANAGER"
},
"dependencies" : [ {
"href" : "/api/v1/stacks/HDP/versions/2.2/services/MAPREDUCE2/components/HISTORYSERVER/dependencies/HDFS_CLIENT",
"Dependencies" : {
"component_name" : "HDFS_CLIENT",
"dependent_component_name" : "HISTORYSERVER",
"dependent_service_name" : "MAPREDUCE2",
"stack_name" : "HDP",
"stack_version" : "2.2"
}
} ]
}]},
{
"href": "/api/v1/stacks/HDP/versions/2.2/services/YARN",
"StackServices": {
"service_name": "YARN",
"service_version": "2.6.0.2.2",
"stack_name": "HDP",
"stack_version": "2.2"
},
"components": [
{
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "APP_TIMELINE_SERVER",
"display_name": "App Timeline Server",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "SLAVE",
"component_name": "NODEMANAGER",
"display_name": "NodeManager",
"is_client": "false",
"is_master": "false",
"hostnames": [
"c6403.ambari.apache.org"
]
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1-2",
"component_category": "MASTER",
"component_name": "RESOURCEMANAGER",
"display_name": "ResourceManager",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "CLIENT",
"component_name": "YARN_CLIENT",
"display_name": "YARN Client",
"is_client": "true",
"is_master": "false",
"hostnames": []
},
"dependencies": []
}
]
},
],
"configurations": configurations,
"changed-configurations": [
{
"type": "yarn-site",
"name": "yarn.scheduler.minimum-allocation-mb"
},
]
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/c6401.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6401.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6401.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6402.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6402.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6402.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6403.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6403.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6403.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
}
]
}
self.stackAdvisor.recommendMapReduce2Configurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_recommendMapredConfigurationAttributes(self):
configurations = {
"mapred-site": {
"properties": {
"mapreduce.map.memory.mb": "1024",
"mapreduce.reduce.memory.mb": "682",
"yarn.app.mapreduce.am.command-opts": "-Xmx546m -Dhdp.version=${hdp.version}",
"mapreduce.reduce.java.opts": "-Xmx546m",
"yarn.app.mapreduce.am.resource.mb": "682",
"mapreduce.map.java.opts": "-Xmx546m",
"mapreduce.task.io.sort.mb": "273"
}
},
"yarn-site": {
"properties": {
"yarn.nodemanager.resource.memory-mb": "1280",
"yarn.scheduler.minimum-allocation-mb": "100",
"yarn.scheduler.maximum-allocation-mb": "1280",
"yarn.nodemanager.resource.cpu-vcores": "2"
},
}
}
clusterData = {
"cpu": 4,
"containers" : 5,
"ramPerContainer": 256
}
expected = {
"yarn-env": {
"properties": {
"min_user_id": "500"
}
},
"mapred-site": {
"properties": {
"mapreduce.map.memory.mb": "100",
"mapreduce.reduce.memory.mb": "200",
"yarn.app.mapreduce.am.command-opts": "-Xmx80m -Dhdp.version=${hdp.version}",
"mapreduce.reduce.java.opts": "-Xmx160m",
"yarn.app.mapreduce.am.resource.mb": "100",
"mapreduce.map.java.opts": "-Xmx80m",
"mapreduce.task.io.sort.mb": "56"
},
"property_attributes": {
'mapreduce.task.io.sort.mb': {'maximum': '2047'},
'yarn.app.mapreduce.am.resource.mb': {'maximum': '1280',
'minimum': '100'},
'mapreduce.map.memory.mb': {'maximum': '1280',
'minimum': '100'},
'mapreduce.reduce.memory.mb': {'maximum': '1280',
'minimum': '100'}
}
},
"yarn-site": {
"properties": {
"yarn.nodemanager.resource.memory-mb": "1280",
"yarn.scheduler.minimum-allocation-mb": "100",
"yarn.scheduler.maximum-allocation-vcores": "1",
"yarn.scheduler.minimum-allocation-vcores": "1",
"yarn.scheduler.maximum-allocation-mb": "1280",
"yarn.nodemanager.resource.cpu-vcores": "1"
},
"property_attributes": {
'yarn.nodemanager.resource.memory-mb': {'maximum': '1877'},
'yarn.nodemanager.resource.cpu-vcores': {'maximum': '2'},
'yarn.scheduler.minimum-allocation-vcores': {'maximum': '1'},
'yarn.scheduler.maximum-allocation-vcores': {'maximum': '1'},
'yarn.scheduler.minimum-allocation-mb': {'maximum': '1280'},
'yarn.scheduler.maximum-allocation-mb': {'maximum': '1280'}
}
}
}
services = {
"services": [
{
"href" : "/api/v1/stacks/HDP/versions/2.2/services/MAPREDUCE2",
"StackServices" : {
"service_name" : "MAPREDUCE2",
"service_version" : "2.6.0.2.2",
"stack_name" : "HDP",
"stack_version" : "2.2"
},
"components" : [ {
"href" : "/api/v1/stacks/HDP/versions/2.2/services/MAPREDUCE2/components/HISTORYSERVER",
"StackServiceComponents" : {
"advertise_version" : "true",
"cardinality" : "1",
"component_category" : "MASTER",
"component_name" : "HISTORYSERVER",
"custom_commands" : [ ],
"display_name" : "History Server",
"is_client" : "false",
"is_master" : "true",
"service_name" : "MAPREDUCE2",
"stack_name" : "HDP",
"stack_version" : "2.2",
"hostnames" : [ "c6402.ambari.apache.org" ]
},
"auto_deploy" : {
"enabled" : "true",
"location" : "YARN/RESOURCEMANAGER"
},
"dependencies" : [ {
"href" : "/api/v1/stacks/HDP/versions/2.2/services/MAPREDUCE2/components/HISTORYSERVER/dependencies/HDFS_CLIENT",
"Dependencies" : {
"component_name" : "HDFS_CLIENT",
"dependent_component_name" : "HISTORYSERVER",
"dependent_service_name" : "MAPREDUCE2",
"stack_name" : "HDP",
"stack_version" : "2.2"
}
} ]
}]},
{
"href": "/api/v1/stacks/HDP/versions/2.2/services/YARN",
"StackServices": {
"service_name": "YARN",
"service_version": "2.6.0.2.2",
"stack_name": "HDP",
"stack_version": "2.2"
},
"components": [
{
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "APP_TIMELINE_SERVER",
"display_name": "App Timeline Server",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "SLAVE",
"component_name": "NODEMANAGER",
"display_name": "NodeManager",
"is_client": "false",
"is_master": "false",
"hostnames": [
"c6403.ambari.apache.org"
]
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1-2",
"component_category": "MASTER",
"component_name": "RESOURCEMANAGER",
"display_name": "ResourceManager",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "CLIENT",
"component_name": "YARN_CLIENT",
"display_name": "YARN Client",
"is_client": "true",
"is_master": "false",
"hostnames": []
},
"dependencies": []
}
]
},
],
"configurations": configurations,
"changed-configurations": [
{
"type": "yarn-site",
"name": "yarn.scheduler.minimum-allocation-mb"
},
]
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/c6401.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6401.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6401.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6402.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6402.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6402.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6403.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6403.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6403.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
}
]
}
self.stackAdvisor.recommendMapReduce2Configurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_recommendAmsConfigurations(self):
configurations = {}
clusterData = {}
services = {
"services": [ {
"StackServices": {
"service_name": "AMBARI_METRICS"
},"components": [{
"StackServiceComponents": {
"component_name": "METRICS_COLLECTOR",
"hostnames": ["host1"]
}
}]
}]
}
hosts = {
"items": [{
"Hosts": {
"host_name": "host1",
}
}]
}
expected = {
"ams-hbase-env": {
"properties": {
"hbase_master_heapsize": "512m",
"hbase_regionserver_heapsize": "512m",
}
},
"ams-env": {
"properties": {
"metrics_collector_heapsize": "512m",
}
},
"ams-hbase-site": {
"properties": {
"hbase.regionserver.global.memstore.lowerLimit": "0.3",
"hbase.regionserver.global.memstore.upperLimit": "0.35",
"hfile.block.cache.size": "0.3",
"hbase_master_xmn_size" : "128m"
}
},
"ams-site": {
"properties": {
"timeline.metrics.host.aggregator.ttl": "86400"
}
}
}
self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_recommendHbaseEnvConfigurations(self):
servicesList = ["HBASE"]
configurations = {}
components = []
hosts = {
"items" : [
{
"Hosts" : {
"cpu_count" : 6,
"total_mem" : 50331648,
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
]
}
}
]
}
expected = {
"hbase-env": {
"properties": {
"hbase_master_heapsize": "8192",
"hbase_regionserver_heapsize": "8192",
}
}
}
clusterData = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
self.assertEquals(clusterData['hbaseRam'], 8)
self.stackAdvisor.recommendHbaseEnvConfigurations(configurations, clusterData, None, None)
self.assertEquals(configurations, expected)
def test_recommendHbaseSiteConfigurations(self):
servicesList = ["HBASE"]
configurations = {}
components = []
hosts = {
"items" : [
{
"Hosts" : {
"cpu_count" : 6,
"total_mem" : 50331648,
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
]
}
}
]
}
services = {
"services" : [
],
"configurations": {
"hbase-env": {
"properties": {
"phoenix_sql_enabled": "true"
}
},
"hbase-site": {
"properties": {}
}
}
}
expected = {
"hbase-site": {
"properties": {
"hbase.regionserver.wal.codec": "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec",
"hbase.region.server.rpc.scheduler.factory.class": "org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory",
"hbase.rpc.controllerfactory.class": "org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory",
"hbase.bucketcache.size": "",
"hbase.bucketcache.percentage.in.combinedcache": "",
"hbase.regionserver.global.memstore.size": "0.4",
"hbase.bucketcache.ioengine": ""
},
'property_attributes': {
'hbase.coprocessor.regionserver.classes': {
'delete': 'true'
}
}
},
"hbase-env": {
"properties": {
"hbase_master_heapsize": "8192",
"hbase_regionserver_heapsize": "8192",
"hbase_max_direct_memory_size": ""
}
}
}
clusterData = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
self.assertEquals(clusterData['hbaseRam'], 8)
# Test when phoenix_sql_enabled = true
self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations, expected)
# Test when phoenix_sql_enabled = false
services['configurations']['hbase-env']['properties']['phoenix_sql_enabled'] = 'false'
expected['hbase-site']['properties']['hbase.regionserver.wal.codec'] = 'org.apache.hadoop.hbase.regionserver.wal.WALCellCodec'
expected['hbase-site']['property_attributes'] = {'hbase.region.server.rpc.scheduler.factory.class': {'delete': 'true'}, 'hbase.rpc.controllerfactory.class': {'delete': 'true'}, 'hbase.coprocessor.regionserver.classes': {'delete': 'true'}}
self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations, expected)
# Test hbase_master_heapsize maximum
hosts['items'][0]['Hosts']['host_name'] = 'host1'
services['services'].append({"StackServices":
{"service_name" : "HBASE",
"service_version" : "2.6.0.2.2"
},
"components":[
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HBASE/components/HBASE_MASTER",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"1+",
"component_name":"HBASE_MASTER",
"custom_commands":[],
"display_name":"DataNode",
"is_client":"false",
"is_master":"false",
"service_name":"HBASE",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host1"
]
},
"dependencies":[]
}]})
services['configurations']['hbase-env']['properties']['phoenix_sql_enabled'] = 'false'
expected['hbase-site']['properties']['hbase.regionserver.wal.codec'] = 'org.apache.hadoop.hbase.regionserver.wal.WALCellCodec'
expected['hbase-site']['property_attributes'] = {'hbase.region.server.rpc.scheduler.factory.class': {'delete': 'true'}, 'hbase.rpc.controllerfactory.class': {'delete': 'true'}, 'hbase.coprocessor.regionserver.classes': {'delete': 'true'}}
expected['hbase-env']['property_attributes'] = {'hbase_master_heapsize': {'maximum': '49152'}}
self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Test when hbase.security.authentication = kerberos
services['configurations']['hbase-site']['properties']['hbase.security.authentication'] = 'kerberos'
expected['hbase-site']['properties']['hbase.coprocessor.region.classes'] = 'org.apache.hadoop.hbase.security.token.TokenProvider'
self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations, expected)
# Test when hbase.security.authentication = simple
services['configurations']['hbase-site']['properties']['hbase.security.authentication'] = 'simple'
expected['hbase-site']['properties']['hbase.coprocessor.region.classes'] = ''
self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations, expected)
# Test when hbase.security.authentication = kerberos AND class already there
configurations['hbase-site']['properties'].pop('hbase.coprocessor.region.classes', None)
services['configurations']['hbase-site']['properties']['hbase.security.authentication'] = 'kerberos'
services['configurations']['hbase-site']['properties']['hbase.coprocessor.region.classes'] = 'a.b.c.d'
expected['hbase-site']['properties']['hbase.coprocessor.region.classes'] = 'a.b.c.d,org.apache.hadoop.hbase.security.token.TokenProvider'
self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations, expected)
# Test when hbase.security.authentication = kerberos AND authorization = true
configurations['hbase-site']['properties'].pop('hbase.coprocessor.region.classes', None)
services['configurations']['hbase-site']['properties']['hbase.security.authentication'] = 'kerberos'
services['configurations']['hbase-site']['properties']['hbase.security.authorization'] = 'true'
expected['hbase-site']['properties']['hbase.coprocessor.master.classes'] = "org.apache.hadoop.hbase.security.access.AccessController"
expected['hbase-site']['properties']['hbase.coprocessor.region.classes'] = 'org.apache.hadoop.hbase.security.access.AccessController,org.apache.hadoop.hbase.security.token.TokenProvider'
expected['hbase-site']['properties']['hbase.coprocessor.regionserver.classes'] = "org.apache.hadoop.hbase.security.access.AccessController"
self.stackAdvisor.recommendHBASEConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations, expected)
def test_recommendHDFSConfigurations(self):
configurations = {
'ranger-hdfs-plugin-properties':{
"properties": {"ranger-hdfs-plugin-enabled":"Yes"}
},
'hdfs-site': {
"properties": {"dfs.datanode.data.dir": "/path/1,/path/2,/path/3,/path/4"}
}
}
clusterData = {
"totalAvailableRam": 2048,
"hBaseInstalled": True,
"hbaseRam": 112,
"reservedRam": 128
}
expected = {
'hadoop-env': {
'properties': {
'namenode_heapsize': '1024',
'namenode_opt_newsize' : '128',
'namenode_opt_maxnewsize' : '128'
},
'property_attributes': {
'dtnode_heapsize': {'maximum': '2048'},
'namenode_heapsize': {'maximum': '10240'}
}
},
'hdfs-site': {
'properties': {
'dfs.datanode.max.transfer.threads': '16384',
'dfs.namenode.safemode.threshold-pct': '1.000',
'dfs.datanode.failed.volumes.tolerated': '1',
'dfs.namenode.handler.count': '25',
'dfs.datanode.data.dir': '/path/1,/path/2,/path/3,/path/4'
},
'property_attributes': {
'dfs.datanode.failed.volumes.tolerated': {'maximum': '4'}
}
},
'ranger-hdfs-plugin-properties': {
'properties': {
'ranger-hdfs-plugin-enabled': 'Yes'
}
}
}
services = {"services":
[{"StackServices":
{"service_name" : "HDFS",
"service_version" : "2.6.0.2.2"
},
"components":[
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/DATANODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"1+",
"component_category":"SLAVE",
"component_name":"DATANODE",
"custom_commands":[
],
"display_name":"DataNode",
"is_client":"false",
"is_master":"false",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host1"
]
},
"dependencies":[
]
},
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/JOURNALNODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"0+",
"component_category":"SLAVE",
"component_name":"JOURNALNODE",
"custom_commands":[
],
"display_name":"JournalNode",
"is_client":"false",
"is_master":"false",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host1"
]
},
"dependencies":[
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/JOURNALNODE/dependencies/HDFS_CLIENT",
"Dependencies":{
"component_name":"HDFS_CLIENT",
"dependent_component_name":"JOURNALNODE",
"dependent_service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2"
}
}
]
},
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/NAMENODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"1-2",
"component_category":"MASTER",
"component_name":"NAMENODE",
"custom_commands":[
"DECOMMISSION",
"REBALANCEHDFS"
],
"display_name":"NameNode",
"is_client":"false",
"is_master":"true",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host2"
]
},
"dependencies":[
]
},
],
}],
"configurations": configurations
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/host1",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "host1",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "host1",
"rack_info" : "/default-rack",
"total_mem" : 2097152
}
},
{
"href" : "/api/v1/hosts/host2",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "host2",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "host2",
"rack_info" : "/default-rack",
"total_mem" : 10485760
}
},
]
}
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Test 1 - namenode heapsize depends on # of datanodes
datanode_hostnames = services["services"][0]["components"][0]["StackServiceComponents"]["hostnames"] # datanode hostnames
for i in xrange(10):
hostname = "datanode" + `i`
datanode_hostnames.append(hostname)
hosts['items'].append(
{
"href" : "/api/v1/hosts/" + hostname,
"Hosts" : {
"cpu_count" : 1,
"host_name" : hostname,
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : hostname,
"rack_info" : "/default-rack",
"total_mem" : 2097152
}
}
)
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_heapsize"], "3072")
self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "384")
self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "384")
# Test 2 - add more datanodes
for i in xrange(11,30):
hostname = "datanode" + `i`
datanode_hostnames.append(hostname)
hosts['items'].append(
{
"href" : "/api/v1/hosts/" + hostname,
"Hosts" : {
"cpu_count" : 1,
"host_name" : hostname,
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : hostname,
"rack_info" : "/default-rack",
"total_mem" : 2097152
}
}
)
# namenode_heapsize depends on number of disks used used by datanode
configurations["hdfs-site"]["properties"]["dfs.datanode.data.dir"] = "/path1,/path2,/path3,/path4"
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_heapsize"], "9984")
self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "1248")
self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "1248")
# Test 3 - more datanodes than host can handle
for i in xrange(31, 90):
hostname = "datanode" + `i`
datanode_hostnames.append(hostname)
hosts['items'].append(
{
"href" : "/api/v1/hosts/" + hostname,
"Hosts" : {
"cpu_count" : 1,
"host_name" : hostname,
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : hostname,
"rack_info" : "/default-rack",
"total_mem" : 2097152
}
}
)
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_heapsize"], "10112")
self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "1264")
self.assertEquals(configurations["hadoop-env"]["properties"]["namenode_opt_maxnewsize"], "1264")
# Test 4 - KMS empty test from previous call
self.assertTrue("dfs.encryption.key.provider.uri" not in configurations["hdfs-site"]["properties"])
# Test 5 - Calculated from hosts install location
services["services"].append(
{"StackServices":
{"service_name" : "RANGER_KMS",
"service_version" : "2.6.0.2.2"
},
"components":[
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/RANGER_KMS/components/RANGER_KMS_SERVER",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"1+",
"component_category":"SLAVE",
"component_name":"RANGER_KMS_SERVER",
"custom_commands":[
],
"display_name":"RANGER_KMS_SERVER",
"is_client":"false",
"is_master":"false",
"service_name":"RANGER_KMS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host1"
]
},
"dependencies":[
]
}
]
})
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEqual("kms://http@host1:9292/kms", configurations["hdfs-site"]["properties"]["dfs.encryption.key.provider.uri"])
# Test 6 - Multiple RANGER_KMS_SERVERs
services["services"][len(services["services"])-1]["components"][0]["StackServiceComponents"]["hostnames"].append("host2")
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEqual("kms://http@host1,host2:9292/kms", configurations["hdfs-site"]["properties"]["dfs.encryption.key.provider.uri"])
# Test 6 - Multiple RANGER_KMS_SERVERs and custom port
configurations["kms-env"] = {"properties": {"kms_port": "1111"}}
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEqual("kms://http@host1,host2:1111/kms", configurations["hdfs-site"]["properties"]["dfs.encryption.key.provider.uri"])
# Test 7 - Override by API caller
configurations["hadoop-env"] = {"properties": {"keyserver_host": "myhost1", "keyserver_port": "2222"}}
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEqual("kms://http@myhost1:2222/kms", configurations["hdfs-site"]["properties"]["dfs.encryption.key.provider.uri"])
def test_validateHDFSConfigurationsEnv(self):
configurations = {}
# 1) ok: namenode_heapsize > recommended
recommendedDefaults = {'namenode_heapsize': '1024',
'namenode_opt_newsize' : '256',
'namenode_opt_maxnewsize' : '256'}
properties = {'namenode_heapsize': '2048',
'namenode_opt_newsize' : '300',
'namenode_opt_maxnewsize' : '300'}
res_expected = []
res = self.stackAdvisor.validateHDFSConfigurationsEnv(properties, recommendedDefaults, configurations, '', '')
self.assertEquals(res, res_expected)
# 2) fail: namenode_heapsize, namenode_opt_maxnewsize < recommended
properties['namenode_heapsize'] = '1022'
properties['namenode_opt_maxnewsize'] = '255'
res_expected = [{'config-type': 'hadoop-env',
'message': 'Value is less than the recommended default of 1024',
'type': 'configuration',
'config-name': 'namenode_heapsize',
'level': 'WARN'},
{'config-name': 'namenode_opt_maxnewsize',
'config-type': 'hadoop-env',
'level': 'WARN',
'message': 'Value is less than the recommended default of 256',
'type': 'configuration'}]
res = self.stackAdvisor.validateHDFSConfigurationsEnv(properties, recommendedDefaults, configurations, '', '')
self.assertEquals(res, res_expected)
def test_validateYARNConfigurationsEnv(self):
configurations = {}
# 1) ok: No yarn_cgroups_enabled
recommendedDefaults = {'namenode_heapsize': '1024',
'namenode_opt_newsize' : '256',
'namenode_opt_maxnewsize' : '256'}
properties = {}
res_expected = []
res = self.stackAdvisor.validateYARNEnvConfigurations(properties, recommendedDefaults, configurations, '', '')
self.assertEquals(res, res_expected)
# 2) ok: yarn_cgroups_enabled=false, but security enabled
properties['yarn_cgroups_enabled'] = 'false'
configurations = {
"core-site": {
"properties": {
"hadoop.security.authentication": "kerberos",
"hadoop.security.authorization": "true"
}
}
}
res_expected = []
res = self.stackAdvisor.validateYARNEnvConfigurations(properties, recommendedDefaults, configurations, '', '')
self.assertEquals(res, res_expected)
# 3) ok: yarn_cgroups_enabled=true, but security enabled
properties['yarn_cgroups_enabled'] = 'true'
res_expected = []
res = self.stackAdvisor.validateYARNEnvConfigurations(properties, recommendedDefaults, configurations, '', '')
self.assertEquals(res, res_expected)
# 4) fail: yarn_cgroups_enabled=true, but security disabled
configurations['core-site']['properties']['hadoop.security.authorization'] = 'false'
res_expected = [{'config-type': 'yarn-env',
'message': 'CPU Isolation should only be enabled if security is enabled',
'type': 'configuration',
'config-name': 'yarn_cgroups_enabled',
'level': 'WARN'}]
res = self.stackAdvisor.validateYARNEnvConfigurations(properties, recommendedDefaults, configurations, '', '')
self.assertEquals(res, res_expected)
def test_validateMR2XmxOptsEnv(self):
recommendedDefaults = {'mapreduce.map.java.opts': '-Xmx500m',
'mapreduce.reduce.java.opts': '-Xmx600m',
'mapreduce.task.io.sort.mb': '100',
'mapreduce.map.memory.mb': '200',
'mapreduce.reduce.memory.mb': '300',
'yarn.app.mapreduce.am.resource.mb': '400',
'yarn.app.mapreduce.am.command-opts': '-Xmx546m -Dhdp.version=${hdp.version}'}
properties = {'mapreduce.map.java.opts': '-Xmxm',
'mapreduce.reduce.java.opts': '-Xmx0m',
'mapreduce.task.io.sort.mb': '110',
'mapreduce.map.memory.mb': '210',
'mapreduce.reduce.memory.mb': '310',
'yarn.app.mapreduce.am.resource.mb': '410',
'yarn.app.mapreduce.am.command-opts': '-Xmx545m -Dhdp.version=${hdp.version}'}
res_expected = [{'config-type': 'mapred-site',
'message': 'Invalid value format',
'type': 'configuration',
'config-name': 'mapreduce.map.java.opts',
'level': 'ERROR'},
{'config-type': 'mapred-site',
'message': 'Value is less than the recommended default of -Xmx600m',
'type': 'configuration',
'config-name': 'mapreduce.reduce.java.opts',
'level': 'WARN'},
{'config-type': 'mapred-site',
'message': 'Value is less than the recommended default of -Xmx546m',
'type': 'configuration',
'config-name': 'yarn.app.mapreduce.am.command-opts',
'level': 'WARN'}]
res = self.stackAdvisor.validateMapReduce2Configurations(properties, recommendedDefaults, {}, '', '')
self.assertEquals(res, res_expected)
def test_validateHiveConfigurationsEnv(self):
properties = {"hive_security_authorization": "None"}
configurations = {"hive-site": {
"properties": {"hive.security.authorization.enabled": "true"}
},
"hive-env": {
"properties": {"hive_security_authorization": "None"}
}
}
res_expected = [
{
"config-type": "hive-env",
"message": "hive_security_authorization should not be None if hive.security.authorization.enabled is set",
'type': 'configuration',
"config-name": "hive_security_authorization",
"level": "ERROR"
}
]
res = self.stackAdvisor.validateHiveConfigurationsEnv(properties, {}, configurations, {}, {})
self.assertEquals(res, res_expected)
pass
def test_recommendYarnCGroupConfigurations(self):
servicesList = ["YARN"]
configurations = {}
components = []
hosts = {
"items" : [
{
"Hosts" : {
"cpu_count" : 6,
"total_mem" : 50331648,
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
],
"public_host_name" : "c6401.ambari.apache.org",
"host_name" : "c6401.ambari.apache.org"
}
}
]
}
services = {
"services" : [ {
"StackServices":{
"service_name": "YARN",
},
"components": [
{
"StackServiceComponents": {
"component_name": "NODEMANAGER",
"hostnames": ["c6401.ambari.apache.org"]
}
}
]
}
],
"configurations": {
"yarn-env": {
"properties": {
"yarn_cgroups_enabled": "true"
}
}
}
}
expected = {
"yarn-env": {
"properties": {
"min_user_id": "500"
}
},
"yarn-site": {
"properties": {
"yarn.nodemanager.container-executor.group": "hadoop",
"yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor",
"yarn.nodemanager.linux-container-executor.cgroups.mount-path": "/cgroup",
"yarn.nodemanager.container-executor.cgroups.mount": "true",
"yarn.nodemanager.resource.memory-mb": "39424",
"yarn.scheduler.minimum-allocation-mb": "3584",
"yarn.scheduler.maximum-allocation-vcores": "4",
"yarn.scheduler.minimum-allocation-vcores": "1",
"yarn.nodemanager.resource.cpu-vcores": "4",
"yarn.nodemanager.container-executor.cgroups.hierarchy": " /yarn",
"yarn.scheduler.maximum-allocation-mb": "39424",
"yarn.nodemanager.container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler"
},
"property_attributes": {
"yarn.scheduler.minimum-allocation-vcores": {
"maximum": "4"
},
"yarn.scheduler.maximum-allocation-vcores": {
"maximum": "4"
},
"yarn.nodemanager.resource.memory-mb": {
"maximum": "49152"
},
"yarn.scheduler.minimum-allocation-mb": {
"maximum": "39424"
},
"yarn.nodemanager.resource.cpu-vcores": {
"maximum": "12"
},
"yarn.scheduler.maximum-allocation-mb": {
"maximum": "39424"
}
}
}
}
clusterData = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
self.assertEquals(clusterData['hbaseRam'], 8)
# Test when yarn_cgroups_enabled = true
self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Test when yarn_cgroups_enabled = false
services['configurations']['yarn-env']['properties']['yarn_cgroups_enabled'] = 'false'
expected = {
"yarn-env": {
"properties": {
"min_user_id": "500"
}
},
"yarn-site": {
"properties": {
"yarn.nodemanager.container-executor.group": "hadoop",
"yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor",
"yarn.nodemanager.linux-container-executor.cgroups.mount-path": "/cgroup",
"yarn.nodemanager.container-executor.cgroups.mount": "true",
"yarn.nodemanager.resource.memory-mb": "39424",
"yarn.scheduler.minimum-allocation-mb": "3584",
"yarn.scheduler.maximum-allocation-vcores": "4",
"yarn.scheduler.minimum-allocation-vcores": "1",
"yarn.nodemanager.resource.cpu-vcores": "4",
"yarn.nodemanager.container-executor.cgroups.hierarchy": " /yarn",
"yarn.scheduler.maximum-allocation-mb": "39424",
"yarn.nodemanager.container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler"
},
"property_attributes": {
"yarn.nodemanager.container-executor.cgroups.mount": {
"delete": "true"
},
"yarn.nodemanager.container-executor.cgroups.hierarchy": {
"delete": "true"
},
"yarn.nodemanager.linux-container-executor.cgroups.mount-path": {
"delete": "true"
},
"yarn.scheduler.minimum-allocation-vcores": {
"maximum": "4"
},
"yarn.scheduler.maximum-allocation-vcores": {
"maximum": "4"
},
"yarn.nodemanager.resource.memory-mb": {
"maximum": "49152"
},
"yarn.scheduler.minimum-allocation-mb": {
"maximum": "39424"
},
"yarn.nodemanager.resource.cpu-vcores": {
"maximum": "12"
},
"yarn.scheduler.maximum-allocation-mb": {
"maximum": "39424"
},
"yarn.nodemanager.container-executor.resources-handler.class": {
"delete": "true"
}
}
}
}
self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
| 39.59041
| 242
| 0.521102
| 8,215
| 97,432
| 6.073402
| 0.083628
| 0.02806
| 0.003367
| 0.024051
| 0.825727
| 0.80372
| 0.772854
| 0.750566
| 0.733249
| 0.725412
| 0
| 0.029345
| 0.337559
| 97,432
| 2,460
| 243
| 39.606504
| 0.743675
| 0.02766
| 0
| 0.611257
| 0
| 0.006108
| 0.41208
| 0.193219
| 0
| 0
| 0
| 0
| 0.027487
| 0
| null | null | 0.000873
| 0.001745
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
435c7706c4d5d1cae30f082b48b5fd0f7f782f7b
| 36,279
|
py
|
Python
|
imcsdk/mometa/mgmt/MgmtIf.py
|
TetrationAnalytics/imcsdk
|
d86e47831f294dc9fa5e99b9a92abceac2502d76
|
[
"Apache-2.0"
] | null | null | null |
imcsdk/mometa/mgmt/MgmtIf.py
|
TetrationAnalytics/imcsdk
|
d86e47831f294dc9fa5e99b9a92abceac2502d76
|
[
"Apache-2.0"
] | null | null | null |
imcsdk/mometa/mgmt/MgmtIf.py
|
TetrationAnalytics/imcsdk
|
d86e47831f294dc9fa5e99b9a92abceac2502d76
|
[
"Apache-2.0"
] | 2
|
2016-05-26T02:05:46.000Z
|
2017-09-13T05:13:25.000Z
|
"""This module contains the general information for MgmtIf ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class MgmtIfConsts:
ADMIN_DUPLEX_NA = "NA"
ADMIN_DUPLEX_AUTO = "auto"
ADMIN_DUPLEX_FULL = "full"
ADMIN_DUPLEX_HALF = "half"
ADMIN_NET_SPEED_100_MBPS = "100Mbps"
ADMIN_NET_SPEED_10_MBPS = "10Mbps"
ADMIN_NET_SPEED_1_GBPS = "1Gbps"
ADMIN_NET_SPEED_NA = "NA"
ADMIN_NET_SPEED_AUTO = "auto"
IF_TYPE_PHYSICAL = "physical"
NIC_MODE_CISCO_CARD = "cisco_card"
NIC_MODE_DEDICATED = "dedicated"
NIC_MODE_SHARED_LOM = "shared_lom"
NIC_MODE_SHARED_LOM_10G = "shared_lom_10g"
NIC_MODE_SHARED_LOM_EXT = "shared_lom_ext"
NIC_MODE_SHIPPING = "shipping"
NIC_REDUNDANCY_ACTIVE_ACTIVE = "active-active"
NIC_REDUNDANCY_ACTIVE_STANDBY = "active-standby"
NIC_REDUNDANCY_NONE = "none"
OPER_DUPLEX_NA = "NA"
OPER_DUPLEX_AUTO = "auto"
OPER_DUPLEX_FULL = "full"
OPER_DUPLEX_HALF = "half"
OPER_NET_SPEED_100_MBPS = "100Mbps"
OPER_NET_SPEED_10_MBPS = "10Mbps"
OPER_NET_SPEED_1_GBPS = "1Gbps"
OPER_NET_SPEED_NA = "NA"
OPER_NET_SPEED_AUTO = "auto"
VIC_SLOT_0 = "0"
VIC_SLOT_1 = "1"
VIC_SLOT_10 = "10"
VIC_SLOT_11 = "11"
VIC_SLOT_12 = "12"
VIC_SLOT_2 = "2"
VIC_SLOT_4 = "4"
VIC_SLOT_5 = "5"
VIC_SLOT_9 = "9"
VIC_SLOT_FLEX_LOM = "flex-lom"
VIC_SLOT_MLOM = "mlom"
VIC_SLOT_RISER1 = "riser1"
VIC_SLOT_RISER2 = "riser2"
IF_TYPE_VIRTUAL = "virtual"
class MgmtIf(ManagedObject):
"""This is MgmtIf class."""
consts = MgmtIfConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("MgmtIf", "mgmtIf", "if-1", VersionMeta.Version151f, "InputOutput", 0x1ffffffff, [], ["admin", "read-only", "user"], ['mgmtController'], ['faultInst', 'ipBlocking', 'ipFiltering'], ["Get", "Set"]),
"modular": MoMeta("MgmtIf", "mgmtIf", "if-1", VersionMeta.Version2013e, "InputOutput", 0x3fffffffffff, [], ["admin", "read-only", "user"], ['equipmentChassis', 'mgmtController'], ['ipBlocking', 'ipFiltering'], ["Get", "Set"])
}
prop_meta = {
"classic": {
"admin_duplex": MoPropertyMeta("admin_duplex", "adminDuplex", "string", VersionMeta.Version209c, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["NA", "auto", "full", "half"], []),
"admin_net_speed": MoPropertyMeta("admin_net_speed", "adminNetSpeed", "string", VersionMeta.Version209c, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["100Mbps", "10Mbps", "1Gbps", "NA", "auto"], []),
"auto_neg": MoPropertyMeta("auto_neg", "autoNeg", "string", VersionMeta.Version209c, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"ddns_domain": MoPropertyMeta("ddns_domain", "ddnsDomain", "string", VersionMeta.Version201a, MoPropertyMeta.READ_WRITE, 0x10, 0, 255, None, [], []),
"ddns_enable": MoPropertyMeta("ddns_enable", "ddnsEnable", "string", VersionMeta.Version201a, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["No", "Yes", "false", "no", "true", "yes"], []),
"ddns_refresh_interval": MoPropertyMeta("ddns_refresh_interval", "ddnsRefreshInterval", "uint", VersionMeta.Version401a, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, [], ["0-8736"]),
"dhcp_enable": MoPropertyMeta("dhcp_enable", "dhcpEnable", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["No", "Yes", "false", "no", "true", "yes"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x100, 0, 255, None, [], []),
"dns_alternate": MoPropertyMeta("dns_alternate", "dnsAlternate", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x200, 0, 255, r"""(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)""", [], []),
"dns_preferred": MoPropertyMeta("dns_preferred", "dnsPreferred", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x400, 0, 255, r"""(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)""", [], []),
"dns_using_dhcp": MoPropertyMeta("dns_using_dhcp", "dnsUsingDhcp", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x800, None, None, None, ["No", "Yes", "false", "no", "true", "yes"], []),
"ext_gw": MoPropertyMeta("ext_gw", "extGw", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x1000, 0, 255, r"""(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)""", [], []),
"ext_ip": MoPropertyMeta("ext_ip", "extIp", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x2000, 0, 255, r"""(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)""", [], []),
"ext_mask": MoPropertyMeta("ext_mask", "extMask", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x4000, 0, 255, r"""(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)""", [], []),
"hostname": MoPropertyMeta("hostname", "hostname", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x8000, 0, 63, r"""(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])""", [], []),
"nic_mode": MoPropertyMeta("nic_mode", "nicMode", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x10000, None, None, None, ["cisco_card", "dedicated", "shared_lom", "shared_lom_10g", "shared_lom_ext", "shipping"], []),
"nic_redundancy": MoPropertyMeta("nic_redundancy", "nicRedundancy", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x20000, None, None, None, ["active-active", "active-standby", "none"], []),
"port_profile": MoPropertyMeta("port_profile", "portProfile", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x40000, None, None, r"""(([a-zA-Z0-9_]{1})|([a-zA-Z0-9_]{1}[a-zA-Z0-9_\-]{0,79}))""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x80000, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x100000, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"v6dhcp_enable": MoPropertyMeta("v6dhcp_enable", "v6dhcpEnable", "string", VersionMeta.Version201a, MoPropertyMeta.READ_WRITE, 0x200000, None, None, None, ["No", "Yes", "false", "no", "true", "yes"], []),
"v6dns_alternate": MoPropertyMeta("v6dns_alternate", "v6dnsAlternate", "string", VersionMeta.Version201a, MoPropertyMeta.READ_WRITE, 0x400000, 0, 255, r"""(https?://)?([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"v6dns_preferred": MoPropertyMeta("v6dns_preferred", "v6dnsPreferred", "string", VersionMeta.Version201a, MoPropertyMeta.READ_WRITE, 0x800000, 0, 255, r"""(https?://)?([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"v6dns_using_dhcp": MoPropertyMeta("v6dns_using_dhcp", "v6dnsUsingDhcp", "string", VersionMeta.Version201a, MoPropertyMeta.READ_WRITE, 0x1000000, None, None, None, ["No", "Yes", "false", "no", "true", "yes"], []),
"v6ext_enabled": MoPropertyMeta("v6ext_enabled", "v6extEnabled", "string", VersionMeta.Version201a, MoPropertyMeta.READ_WRITE, 0x2000000, None, None, None, ["No", "Yes", "false", "no", "true", "yes"], []),
"v6ext_gw": MoPropertyMeta("v6ext_gw", "v6extGw", "string", VersionMeta.Version201a, MoPropertyMeta.READ_WRITE, 0x4000000, 0, 255, r"""(https?://)?([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"v6ext_ip": MoPropertyMeta("v6ext_ip", "v6extIp", "string", VersionMeta.Version201a, MoPropertyMeta.READ_WRITE, 0x8000000, 0, 255, r"""(https?://)?([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"v6prefix": MoPropertyMeta("v6prefix", "v6prefix", "uint", VersionMeta.Version201a, MoPropertyMeta.READ_WRITE, 0x10000000, None, None, None, [], ["1-128"]),
"vic_slot": MoPropertyMeta("vic_slot", "vicSlot", "string", VersionMeta.Version202c, MoPropertyMeta.READ_WRITE, 0x20000000, None, None, None, ["0", "1", "10", "11", "12", "2", "4", "5", "9", "flex-lom", "mlom", "riser1", "riser2"], []),
"vlan_enable": MoPropertyMeta("vlan_enable", "vlanEnable", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x40000000, None, None, None, ["No", "Yes", "false", "no", "true", "yes"], []),
"vlan_id": MoPropertyMeta("vlan_id", "vlanId", "uint", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x80000000, None, None, None, [], ["1-4094"]),
"vlan_priority": MoPropertyMeta("vlan_priority", "vlanPriority", "uint", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x100000000, None, None, None, [], ["0-7"]),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"description": MoPropertyMeta("description", "description", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"ext_enabled": MoPropertyMeta("ext_enabled", "extEnabled", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"id": MoPropertyMeta("id", "id", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"if_type": MoPropertyMeta("if_type", "ifType", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, ["physical"], []),
"mac": MoPropertyMeta("mac", "mac", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"oper_duplex": MoPropertyMeta("oper_duplex", "operDuplex", "string", VersionMeta.Version209c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["NA", "auto", "full", "half"], []),
"oper_net_speed": MoPropertyMeta("oper_net_speed", "operNetSpeed", "string", VersionMeta.Version209c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["100Mbps", "10Mbps", "1Gbps", "NA", "auto"], []),
"subject": MoPropertyMeta("subject", "subject", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"v6_slaac_ip": MoPropertyMeta("v6_slaac_ip", "v6SlaacIp", "string", VersionMeta.Version202c, MoPropertyMeta.READ_ONLY, None, 0, 255, r"""(https?://)?([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"v6link_local": MoPropertyMeta("v6link_local", "v6linkLocal", "string", VersionMeta.Version201a, MoPropertyMeta.READ_ONLY, None, 0, 255, r"""(https?://)?([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
},
"modular": {
"admin_duplex": MoPropertyMeta("admin_duplex", "adminDuplex", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["NA", "auto", "full", "half"], []),
"admin_net_speed": MoPropertyMeta("admin_net_speed", "adminNetSpeed", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["100Mbps", "10Mbps", "1Gbps", "NA", "auto"], []),
"auto_neg": MoPropertyMeta("auto_neg", "autoNeg", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"ddns_domain": MoPropertyMeta("ddns_domain", "ddnsDomain", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, 0, 255, None, [], []),
"ddns_enable": MoPropertyMeta("ddns_enable", "ddnsEnable", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["No", "Yes", "no", "yes"], []),
"ddns_refresh_interval": MoPropertyMeta("ddns_refresh_interval", "ddnsRefreshInterval", "uint", VersionMeta.Version404b, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, [], ["0-8736"]),
"dhcp_enable": MoPropertyMeta("dhcp_enable", "dhcpEnable", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["No", "Yes", "no", "yes"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x100, 0, 255, None, [], []),
"dns_alternate": MoPropertyMeta("dns_alternate", "dnsAlternate", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x200, 0, 255, r"""(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)""", [], []),
"dns_preferred": MoPropertyMeta("dns_preferred", "dnsPreferred", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x400, 0, 255, r"""(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)""", [], []),
"dns_using_dhcp": MoPropertyMeta("dns_using_dhcp", "dnsUsingDhcp", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x800, None, None, None, ["No", "Yes", "no", "yes"], []),
"ext_gw": MoPropertyMeta("ext_gw", "extGw", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x1000, 0, 255, r"""(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)""", [], []),
"ext_ip": MoPropertyMeta("ext_ip", "extIp", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2000, 0, 255, r"""(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)""", [], []),
"ext_mask": MoPropertyMeta("ext_mask", "extMask", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4000, 0, 255, r"""(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)""", [], []),
"hostname": MoPropertyMeta("hostname", "hostname", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8000, 0, 63, r"""(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])""", [], []),
"nic_mode": MoPropertyMeta("nic_mode", "nicMode", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10000, None, None, None, ["cisco_card", "dedicated", "shared_lom", "shared_lom_10g", "shared_lom_ext", "shipping"], []),
"nic_redundancy": MoPropertyMeta("nic_redundancy", "nicRedundancy", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20000, None, None, None, ["active-active", "active-standby", "none"], []),
"port_profile": MoPropertyMeta("port_profile", "portProfile", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40000, None, None, r"""(([a-zA-Z0-9_]{1})|([a-zA-Z0-9_]{1}[a-zA-Z0-9_\-]{0,79}))""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80000, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x100000, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"v6dhcp_enable": MoPropertyMeta("v6dhcp_enable", "v6dhcpEnable", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x200000, None, None, None, ["No", "Yes", "no", "yes"], []),
"v6dns_alternate": MoPropertyMeta("v6dns_alternate", "v6dnsAlternate", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x400000, 0, 255, r"""(https?://)?([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"v6dns_preferred": MoPropertyMeta("v6dns_preferred", "v6dnsPreferred", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x800000, 0, 255, r"""(https?://)?([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"v6dns_using_dhcp": MoPropertyMeta("v6dns_using_dhcp", "v6dnsUsingDhcp", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x1000000, None, None, None, ["No", "Yes", "no", "yes"], []),
"v6ext_enabled": MoPropertyMeta("v6ext_enabled", "v6extEnabled", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2000000, None, None, None, ["No", "Yes", "no", "yes"], []),
"v6ext_gw": MoPropertyMeta("v6ext_gw", "v6extGw", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4000000, 0, 255, r"""(https?://)?([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"v6ext_ip": MoPropertyMeta("v6ext_ip", "v6extIp", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8000000, 0, 255, r"""(https?://)?([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"v6prefix": MoPropertyMeta("v6prefix", "v6prefix", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10000000, None, None, None, [], ["1-128"]),
"vic_slot": MoPropertyMeta("vic_slot", "vicSlot", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20000000, None, None, None, ["0", "1", "10", "2", "4", "5", "9", "flex-lom", "riser1", "riser2"], []),
"vlan_enable": MoPropertyMeta("vlan_enable", "vlanEnable", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40000000, None, None, None, ["No", "Yes", "no", "yes"], []),
"vlan_id": MoPropertyMeta("vlan_id", "vlanId", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80000000, None, None, None, [], ["1-4094"]),
"vlan_priority": MoPropertyMeta("vlan_priority", "vlanPriority", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x100000000, None, None, None, [], ["0-7"]),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"description": MoPropertyMeta("description", "description", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"ext_enabled": MoPropertyMeta("ext_enabled", "extEnabled", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"id": MoPropertyMeta("id", "id", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"if_type": MoPropertyMeta("if_type", "ifType", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["physical", "virtual"], []),
"mac": MoPropertyMeta("mac", "mac", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"oper_duplex": MoPropertyMeta("oper_duplex", "operDuplex", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["NA", "auto", "full", "half"], []),
"oper_net_speed": MoPropertyMeta("oper_net_speed", "operNetSpeed", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["100Mbps", "10Mbps", "1Gbps", "NA", "auto"], []),
"single_ip_enable": MoPropertyMeta("single_ip_enable", "singleIPEnable", "string", VersionMeta.Version404b, MoPropertyMeta.READ_WRITE, 0x200000000, None, None, None, ["No", "Yes", "no", "yes"], []),
"starting_port": MoPropertyMeta("starting_port", "startingPort", "uint", VersionMeta.Version404b, MoPropertyMeta.READ_WRITE, 0x400000000, None, None, None, [], ["9000-65529"]),
"subject": MoPropertyMeta("subject", "subject", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"v4_ip_addr": MoPropertyMeta("v4_ip_addr", "v4IPAddr", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x800000000, 0, 255, r"""(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)""", [], []),
"v4_ip_addr_bmc1": MoPropertyMeta("v4_ip_addr_bmc1", "v4IPAddrBmc1", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x1000000000, 0, 255, r"""(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)""", [], []),
"v4_ip_addr_bmc2": MoPropertyMeta("v4_ip_addr_bmc2", "v4IPAddrBmc2", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2000000000, 0, 255, r"""(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)""", [], []),
"v4_ip_addr_cmc1": MoPropertyMeta("v4_ip_addr_cmc1", "v4IPAddrCmc1", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4000000000, 0, 255, r"""(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)""", [], []),
"v4_ip_addr_cmc2": MoPropertyMeta("v4_ip_addr_cmc2", "v4IPAddrCmc2", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8000000000, 0, 255, r"""(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)""", [], []),
"v6_ip_addr": MoPropertyMeta("v6_ip_addr", "v6IPAddr", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10000000000, 0, 255, r"""(https?://)?([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"v6_ip_addr_bmc1": MoPropertyMeta("v6_ip_addr_bmc1", "v6IPAddrBmc1", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20000000000, 0, 255, r"""(https?://)?([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"v6_ip_addr_bmc2": MoPropertyMeta("v6_ip_addr_bmc2", "v6IPAddrBmc2", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40000000000, 0, 255, r"""(https?://)?([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"v6_ip_addr_cmc1": MoPropertyMeta("v6_ip_addr_cmc1", "v6IPAddrCmc1", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80000000000, 0, 255, r"""(https?://)?([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"v6_ip_addr_cmc2": MoPropertyMeta("v6_ip_addr_cmc2", "v6IPAddrCmc2", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x100000000000, 0, 255, r"""(https?://)?([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"v6_slaac_ip": MoPropertyMeta("v6_slaac_ip", "v6SlaacIp", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 255, r"""(https?://)?([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"v6link_local": MoPropertyMeta("v6link_local", "v6linkLocal", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, 0, 255, r"""(https?://)?([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"v_hostname": MoPropertyMeta("v_hostname", "vHostname", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x200000000000, 0, 63, r"""(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])""", [], []),
},
}
prop_map = {
"classic": {
"adminDuplex": "admin_duplex",
"adminNetSpeed": "admin_net_speed",
"autoNeg": "auto_neg",
"ddnsDomain": "ddns_domain",
"ddnsEnable": "ddns_enable",
"ddnsRefreshInterval": "ddns_refresh_interval",
"dhcpEnable": "dhcp_enable",
"dn": "dn",
"dnsAlternate": "dns_alternate",
"dnsPreferred": "dns_preferred",
"dnsUsingDhcp": "dns_using_dhcp",
"extGw": "ext_gw",
"extIp": "ext_ip",
"extMask": "ext_mask",
"hostname": "hostname",
"nicMode": "nic_mode",
"nicRedundancy": "nic_redundancy",
"portProfile": "port_profile",
"rn": "rn",
"status": "status",
"v6dhcpEnable": "v6dhcp_enable",
"v6dnsAlternate": "v6dns_alternate",
"v6dnsPreferred": "v6dns_preferred",
"v6dnsUsingDhcp": "v6dns_using_dhcp",
"v6extEnabled": "v6ext_enabled",
"v6extGw": "v6ext_gw",
"v6extIp": "v6ext_ip",
"v6prefix": "v6prefix",
"vicSlot": "vic_slot",
"vlanEnable": "vlan_enable",
"vlanId": "vlan_id",
"vlanPriority": "vlan_priority",
"childAction": "child_action",
"description": "description",
"extEnabled": "ext_enabled",
"id": "id",
"ifType": "if_type",
"mac": "mac",
"operDuplex": "oper_duplex",
"operNetSpeed": "oper_net_speed",
"subject": "subject",
"v6SlaacIp": "v6_slaac_ip",
"v6linkLocal": "v6link_local",
},
"modular": {
"adminDuplex": "admin_duplex",
"adminNetSpeed": "admin_net_speed",
"autoNeg": "auto_neg",
"ddnsDomain": "ddns_domain",
"ddnsEnable": "ddns_enable",
"ddnsRefreshInterval": "ddns_refresh_interval",
"dhcpEnable": "dhcp_enable",
"dn": "dn",
"dnsAlternate": "dns_alternate",
"dnsPreferred": "dns_preferred",
"dnsUsingDhcp": "dns_using_dhcp",
"extGw": "ext_gw",
"extIp": "ext_ip",
"extMask": "ext_mask",
"hostname": "hostname",
"nicMode": "nic_mode",
"nicRedundancy": "nic_redundancy",
"portProfile": "port_profile",
"rn": "rn",
"status": "status",
"v6dhcpEnable": "v6dhcp_enable",
"v6dnsAlternate": "v6dns_alternate",
"v6dnsPreferred": "v6dns_preferred",
"v6dnsUsingDhcp": "v6dns_using_dhcp",
"v6extEnabled": "v6ext_enabled",
"v6extGw": "v6ext_gw",
"v6extIp": "v6ext_ip",
"v6prefix": "v6prefix",
"vicSlot": "vic_slot",
"vlanEnable": "vlan_enable",
"vlanId": "vlan_id",
"vlanPriority": "vlan_priority",
"childAction": "child_action",
"description": "description",
"extEnabled": "ext_enabled",
"id": "id",
"ifType": "if_type",
"mac": "mac",
"operDuplex": "oper_duplex",
"operNetSpeed": "oper_net_speed",
"singleIPEnable": "single_ip_enable",
"startingPort": "starting_port",
"subject": "subject",
"v4IPAddr": "v4_ip_addr",
"v4IPAddrBmc1": "v4_ip_addr_bmc1",
"v4IPAddrBmc2": "v4_ip_addr_bmc2",
"v4IPAddrCmc1": "v4_ip_addr_cmc1",
"v4IPAddrCmc2": "v4_ip_addr_cmc2",
"v6IPAddr": "v6_ip_addr",
"v6IPAddrBmc1": "v6_ip_addr_bmc1",
"v6IPAddrBmc2": "v6_ip_addr_bmc2",
"v6IPAddrCmc1": "v6_ip_addr_cmc1",
"v6IPAddrCmc2": "v6_ip_addr_cmc2",
"v6SlaacIp": "v6_slaac_ip",
"v6linkLocal": "v6link_local",
"vHostname": "v_hostname",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.admin_duplex = None
self.admin_net_speed = None
self.auto_neg = None
self.ddns_domain = None
self.ddns_enable = None
self.ddns_refresh_interval = None
self.dhcp_enable = None
self.dns_alternate = None
self.dns_preferred = None
self.dns_using_dhcp = None
self.ext_gw = None
self.ext_ip = None
self.ext_mask = None
self.hostname = None
self.nic_mode = None
self.nic_redundancy = None
self.port_profile = None
self.status = None
self.v6dhcp_enable = None
self.v6dns_alternate = None
self.v6dns_preferred = None
self.v6dns_using_dhcp = None
self.v6ext_enabled = None
self.v6ext_gw = None
self.v6ext_ip = None
self.v6prefix = None
self.vic_slot = None
self.vlan_enable = None
self.vlan_id = None
self.vlan_priority = None
self.child_action = None
self.description = None
self.ext_enabled = None
self.id = None
self.if_type = None
self.mac = None
self.oper_duplex = None
self.oper_net_speed = None
self.subject = None
self.v6_slaac_ip = None
self.v6link_local = None
self.single_ip_enable = None
self.starting_port = None
self.v4_ip_addr = None
self.v4_ip_addr_bmc1 = None
self.v4_ip_addr_bmc2 = None
self.v4_ip_addr_cmc1 = None
self.v4_ip_addr_cmc2 = None
self.v6_ip_addr = None
self.v6_ip_addr_bmc1 = None
self.v6_ip_addr_bmc2 = None
self.v6_ip_addr_cmc1 = None
self.v6_ip_addr_cmc2 = None
self.v_hostname = None
ManagedObject.__init__(self, "MgmtIf", parent_mo_or_dn, **kwargs)
| 106.078947
| 605
| 0.566884
| 5,785
| 36,279
| 3.441314
| 0.04771
| 0.02823
| 0.064045
| 0.076854
| 0.842074
| 0.818666
| 0.742063
| 0.73257
| 0.725136
| 0.661543
| 0
| 0.131532
| 0.141845
| 36,279
| 341
| 606
| 106.390029
| 0.507918
| 0.002536
| 0
| 0.28125
| 0
| 0.115625
| 0.465404
| 0.278231
| 0
| 0
| 0.017084
| 0
| 0
| 1
| 0.003125
| false
| 0
| 0.009375
| 0
| 0.165625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
43625bd5c36db6d7ec1d03f23246f1ee9f5c071b
| 3,640
|
py
|
Python
|
TEST/code.tst.py
|
ihgazni2/rblk
|
653d0687c9a4a768e8f327d065e60c7c5d9ddfba
|
[
"MIT"
] | null | null | null |
TEST/code.tst.py
|
ihgazni2/rblk
|
653d0687c9a4a768e8f327d065e60c7c5d9ddfba
|
[
"MIT"
] | null | null | null |
TEST/code.tst.py
|
ihgazni2/rblk
|
653d0687c9a4a768e8f327d065e60c7c5d9ddfba
|
[
"MIT"
] | null | null | null |
#parser
from xdict.jprint import pobj,pdir
from rblk.txt import Parser
txt = '''SSS{ABC"UVW(axb)"MMM[aa]vv(bb(c()c))}<rr<ss>tt>UUU'''
tag_pairs = '""{}()[]<>'
p = Parser(txt,tag_pairs)
pdir(p,range=[-10,-1])
p.txt
p.breadths
p.depth
#text_mat
from rblk.txt import Parser
txt = '''SSS{ABC"UVW(axb)"MMM[aa]vv(bb(c()c))}<rr<ss>tt>UUU'''
tag_pairs = '""{}()[]<>'
p = Parser(txt,tag_pairs)
dummy = [print(each) for each in p.text_mat]
#srch4txt()
from rblk.txt import Parser
txt = '''SSS{ABC"UVW(axb)"MMM[aa]vv(bb(c()c))}<rr<ss>tt>UUU'''
tag_pairs = '""{}()[]<>'
p = Parser(txt,tag_pairs)
dummy = [print(each) for each in p.text_mat]
p.srch4txt()
#lvsrch4txt(lv)
from rblk.txt import Parser
txt = '''SSS{ABC"UVW(axb)"MMM[aa]vv(bb(c()c))}<rr<ss>tt>UUU'''
tag_pairs = '""{}()[]<>'
p = Parser(txt,tag_pairs)
dummy = [print(each) for each in p.text_mat]
p.lvsrch4txt(2)
#lvsrch4txt_from(from_lv)
from rblk.txt import Parser
txt = '''SSS{ABC"UVW(axb)"MMM[aa]vv(bb(c()c))}<rr<ss>tt>UUU'''
tag_pairs = '""{}()[]<>'
p = Parser(txt,tag_pairs)
dummy = [print(each) for each in p.text_mat]
p.lvsrch4txt_from(3)
#lvsrch4txt_to(to_lv)
from rblk.txt import Parser
txt = '''SSS{ABC"UVW(axb)"MMM[aa]vv(bb(c()c))}<rr<ss>tt>UUU'''
tag_pairs = '""{}()[]<>'
p = Parser(txt,tag_pairs)
dummy = [print(each) for each in p.text_mat]
p.lvsrch4txt_to(3)
#lvsrch4txt_fromto(from_lv,to_lv)
from rblk.txt import Parser
txt = '''SSS{ABC"UVW(axb)"MMM[aa]vv(bb(c()c))}<rr<ss>tt>UUU'''
tag_pairs = '""{}()[]<>'
p = Parser(txt,tag_pairs)
dummy = [print(each) for each in p.text_mat]
p.lvsrch4txt_fromto(2,3)
#srch4blk(tag)
print("<two reserved tags null and root> null:txt-without-block root:original-txt")
from rblk.txt import Parser
txt = '''SSS{ABC"UVW(axb)"MMM[aa]vv(bb(c()c))}<rr<ss>tt>UUU'''
tag_pairs = '""{}()[]<>'
p = Parser(txt,tag_pairs)
p.srch4blk('<')
p.srch4blk('(')
p.srch4blk('[')
p.srch4blk('{')
p.srch4blk('"')
p.srch4blk("null")
p.srch4blk("root")
#lvsrch4blk(tag,lv)
from rblk.txt import Parser
txt = '''SSS{ABC"UVW(axb)"MMM[aa]vv(bb(c()c))}<rr<ss>tt>UUU'''
tag_pairs = '""{}()[]<>'
p = Parser(txt,tag_pairs)
p.srch4blk('(')
dummy = [print(each) for each in p.text_mat]
p.lvsrch4blk('(',3)
#lvsrch4blk_from(tag,from_lv)
from rblk.txt import Parser
txt = '''SSS{ABC"UVW(axb)"MMM[aa]vv(bb(c()c))}<rr<ss>tt>UUU'''
tag_pairs = '""{}()[]<>'
p = Parser(txt,tag_pairs)
p.srch4blk('(')
dummy = [print(each) for each in p.text_mat]
p.lvsrch4blk_from('(',3)
p.lvsrch4blk_from('(',4)
#lvsrch4blk_to(tag,to_lv)
from rblk.txt import Parser
txt = '''SSS{ABC"UVW(axb)"MMM[aa]vv(bb(c()c))}<rr<ss>tt>UUU'''
tag_pairs = '""{}()[]<>'
p = Parser(txt,tag_pairs)
p.srch4blk('(')
dummy = [print(each) for each in p.text_mat]
p.lvsrch4blk_to('(',3)
p.lvsrch4blk_to('(',4)
#lvsrch4blk_fromto(tag,from_lv,to_lv)
from rblk.txt import Parser
txt = '''SSS{ABC"UVW(axb)"MMM[aa]vv(bb(c()c))}<rr<ss>tt>UUU'''
tag_pairs = '""{}()[]<>'
p = Parser(txt,tag_pairs)
p.srch4blk('(')
dummy = [print(each) for each in p.text_mat]
p.lvsrch4blk_fromto('(',3,4)
#srch4loc(tag)
from rblk.txt import Parser
txt = '''SSS{ABC"UVW(axb)"MMM[aa]vv(bb(c()c))}<rr<ss>tt>UUU'''
tag_pairs = '""{}()[]<>'
p = Parser(txt,tag_pairs)
p.srch4blk('(')
dummy = [print(each) for each in p.text_mat]
p.srch4loc('(')
#non-symmetric-tag_pairs
from rblk.txt import Parser
txt = '''{ddd@#ddd>'''
tag_pairs = '{@#>'
p = Parser(txt,tag_pairs)
dummy = [print(each) for each in p.text_mat]
#chinese-tag_pairs
from rblk.txt import Parser
txt = '''的dd的【人【uu】人】'''
tag_pairs = '的的【】'
p = Parser(txt,tag_pairs)
dummy = [print(each) for each in p.text_mat]
p.srch4txt()
| 20.449438
| 84
| 0.643407
| 655
| 3,640
| 3.471756
| 0.099237
| 0.112577
| 0.079156
| 0.112137
| 0.816623
| 0.816623
| 0.816623
| 0.816623
| 0.786719
| 0.786719
| 0
| 0.01578
| 0.112088
| 3,640
| 177
| 85
| 20.564972
| 0.687809
| 0.080495
| 0
| 0.740385
| 0
| 0.125
| 0.27352
| 0.201984
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0.144231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
438b37664b87105c6b593693a69cd226685fd87f
| 35,754
|
py
|
Python
|
src/figPlots.py
|
ivezic/MultifitTests
|
9d22d9f5c40d21ec693c0a7d54e2c6f6dda3f36e
|
[
"MIT"
] | null | null | null |
src/figPlots.py
|
ivezic/MultifitTests
|
9d22d9f5c40d21ec693c0a7d54e2c6f6dda3f36e
|
[
"MIT"
] | null | null | null |
src/figPlots.py
|
ivezic/MultifitTests
|
9d22d9f5c40d21ec693c0a7d54e2c6f6dda3f36e
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
from astroML.stats import binned_statistic_2d
from astroML.plotting.mcmc import convert_to_stdev
from tools2Dgauss import *
def FourPanelPlot(oneDpixels, nonoise, psf, image, diffimage):
fig = plt.figure(figsize=(8, 8))
fig.subplots_adjust(left=0.13, bottom=0.15, right=0.95, top=0.90, wspace=0.18, hspace=0.46)
## noiseless image
ax = fig.add_subplot(221)
ax.set_title('noiseless image', fontsize=14)
plt.imshow(nonoise, origin='lower', interpolation='nearest',
extent=(oneDpixels[0], oneDpixels[-1], oneDpixels[0], oneDpixels[-1]),
cmap=plt.cm.binary, aspect='auto')
plt.clim(-20, 100)
plt.colorbar().set_label('')
ax.set_xlabel(r'x (pixels)', fontsize=12)
ax.set_ylabel(r'y (pixels)', fontsize=12)
## psf image
ax = fig.add_subplot(223)
ax.set_title('psf image', fontsize=14)
plt.imshow(psf, origin='lower', interpolation='nearest',
extent=(oneDpixels[0], oneDpixels[-1], oneDpixels[0], oneDpixels[-1]),
cmap=plt.cm.binary, aspect='auto')
plt.clim(-20, 100)
plt.colorbar().set_label('')
ax.set_xlabel(r'x (pixels)', fontsize=12)
ax.set_ylabel(r'y (pixels)', fontsize=12)
## image with noise
ax = fig.add_subplot(222)
ax.set_title('image with noise', fontsize=14)
plt.imshow(image, origin='lower', interpolation='nearest',
extent=(oneDpixels[0], oneDpixels[-1], oneDpixels[0], oneDpixels[-1]),
cmap=plt.cm.binary, aspect='auto')
plt.clim(-20, 100)
plt.colorbar().set_label(r'$counts$', fontsize=14)
ax.set_xlabel(r'x (pixels)', fontsize=12)
ax.set_ylabel(r'y (pixels)', fontsize=12)
## difference object - psf
ax = fig.add_subplot(224)
ax.set_title('image - psf', fontsize=14)
plt.imshow(diffimage, origin='lower', interpolation='nearest',
extent=(oneDpixels[0], oneDpixels[-1], oneDpixels[0], oneDpixels[-1]),
cmap=plt.cm.binary, aspect='auto')
plt.clim(-50, 50)
plt.colorbar().set_label(r'$counts$', fontsize=14)
ax.set_xlabel(r'x (pixels)', fontsize=12)
ax.set_ylabel(r'y (pixels)', fontsize=12)
plt.show()
def dmagPlot(SNRmod1, dmag1, dmagStd1, SNRmod2, dmag2, dmagStd2):
fig = plt.figure(figsize=(8, 8))
fig.subplots_adjust(left=0.13, bottom=0.15, right=0.95, top=0.90, wspace=0.18, hspace=0.46)
##
ax = fig.add_subplot(111)
ax.set_title('$(m_{psf}-m_{mod}) \pm \sigma$', fontsize=14)
ax.plot(1/SNRmod1, dmag1+dmagStd1, '-r', label=r'$\theta_{in}=\theta_{psf}/4$')
ax.plot(1/SNRmod1, dmag1-dmagStd1, '-r', label=r'$\theta_{in}=\theta_{psf}/4$')
ax.plot(1/SNRmod1, dmag1, '--r', label=r'$\theta_{in}=\theta_{psf}/4$')
ax.plot(1/SNRmod2, dmag2+dmagStd2, '-b', label=r'$\theta_{in}=\theta_{psf}/2$')
ax.plot(1/SNRmod2, dmag2-dmagStd2, '-b', label=r'$\theta_{in}=\theta_{psf}/2$')
ax.plot(1/SNRmod2, dmag2, '--b', label=r'$\theta_{in}=\theta_{psf}/2$')
ax.set_xlabel(r'1/SNR', fontsize=12)
ax.set_ylabel(r'$(m_{psf}-m_{mod}) \pm \sigma$', fontsize=12)
plt.show()
# e.g.
# figCcomparison('SGsimpleMCMC_2params_m15sigt1_3000.dat')
def figCcomparison(datafile, name = None, title=''):
#### read data VOLATILE: assumes SGsimple*dat files with the vectors below
v = np.loadtxt(datafile)
vNames = ['sC1', 'sC2', 'sC3', 'SneffPSF', 'SneffModel', 'Schi2PSF', 'Schi2Model', 'sCmod', 'SbestA', \
'SbestARMS', 'SbestSig', 'SbestSigErr']
vNames = vNames + ['C1', 'C2', 'C3', 'neffPSF', 'neffModel', 'chi2PSF', 'chi2Model', 'Cmod', 'bestA', \
'bestARMS', 'bestSig', 'bestSigErr']
def plotPanel(sX, sY, gX, gY, xMin, xMax, yMin, yMax, xLabel, yLabel, title=''):
## bin (astroML code)
# axes limits
range = np.zeros((2,2))
range[0,0]=xMin
range[0,1]=xMax
range[1,0]=yMin
range[1,1]=yMax
NS, xedgesS, yedgesS = binned_statistic_2d(sX, sY, sY,'count', bins=20, range=range)
NG, xedgesG, yedgesG = binned_statistic_2d(gX, gY, gY,'count', bins=20, range=range)
## plot
## galaxies are blue contours
levels = np.linspace(0, np.log10(NG.max()), 7)[2:]
# plt.contour(np.log10(NG.T), levels, colors='b', linewidths=2, extent=[xedgesG[0], xedgesG[-1], yedgesG[0], yedgesG[-1]])
plt.scatter(gX, gY, color='blue', s=5, linewidths=1, alpha=0.2)
# stars are copper continuous map
cmap = plt.cm.copper
cmap.set_bad('w', 0.0)
# plt.imshow(np.log10(NS.T), origin='lower', extent=[xedgesS[0], xedgesS[-1], yedgesS[0], yedgesS[-1]], aspect='auto', interpolation='nearest', cmap=cmap)
plt.scatter(sX, sY, color='red', s=10, linewidths=1, alpha=0.25)
plt.xlim(xMin, xMax)
plt.ylim(yMin, yMax)
plt.xlabel(xLabel, fontsize=16)
plt.ylabel(yLabel, fontsize=16)
xTitle = xMin + 0.05*(xMax-xMin)
yTitle = yMax + 0.05*(yMax-yMin)
ax.text(xTitle, yTitle, title)
#--------------------
# names for classification quantities
C1name = '$C_{Sebok}$'
C2name = '$m_{psf}-m_{mod}$'
C3name = '$\chi^2_{psf}-\chi^2_{mod}$'
C4name = '$\sigma \,\, (pixel)$'
C1 = v[vNames.index('C1')]
C2 = 2.5*np.log10(v[vNames.index('C2')])
C3 = v[vNames.index('C3')]
C4 = v[vNames.index('bestSig')]
sC1 = v[vNames.index('sC1')]
sC2 = 2.5*np.log10(v[vNames.index('sC2')])
sC3 = v[vNames.index('sC3')]
sC4 = v[vNames.index('SbestSig')]
## Create figure and subplots
fig = plt.figure(figsize=(8, 8))
fig.subplots_adjust(wspace=0.25, hspace=0.25, left=0.1, right=0.95, bottom=0.12, top=0.95)
C2min = -0.05
C2max = 0.7
#
ax = plt.subplot(321)
xLabel = C2name
yLabel = C1name
xMin = C2min
xMax= C2max
yMin=0.99
yMax=1.12
yMin = np.min(sC1)
yMax = np.max(C1)
yMax = 20.0
yMin = -5.0
plotPanel(sC2, sC1, C2, C1, xMin, xMax, yMin, yMax, xLabel, yLabel, title=title)
#
ax = plt.subplot(322)
xLabel = C2name
yLabel = C3name
xMin = C2min
xMax= C2max
yMin=-2
yMax=22
plotPanel(sC2, sC3, C2, C3, xMin, xMax, yMin, yMax, xLabel, yLabel)
#
ax = plt.subplot(323)
xLabel = C2name
yLabel = C4name
xMin = C2min
xMax= C2max
yMin=-0.05
yMax=1.95
plotPanel(sC2, sC4, C2, C4, xMin, xMax, yMin, yMax, xLabel, yLabel)
#
ax = plt.subplot(324)
xLabel = C1name
yLabel = C3name
xMin = 0.99
xMax= 1.12
xMin = np.min(sC1)
xMax = np.max(C1)
xMin = -5.0
xMax = 20.0
yMin=-2
yMax=22
plotPanel(sC1, sC3, C1, C3, xMin, xMax, yMin, yMax, xLabel, yLabel)
if (name is None):
plt.show()
else:
print 'saving plot to:', name
plt.savefig(name, bbox_inches='tight')
def figClassification(datafile, name = None, title=''):
#### read data VOLATILE: assumes SGsimple*dat files with the vectors below
v = np.loadtxt(datafile)
vNames = ['sC1', 'sC2', 'sC3', 'SneffPSF', 'SneffModel', 'Schi2PSF', 'Schi2Model', 'sCmod', 'SbestA', \
'SbestARMS', 'SbestSig', 'SbestSigErr']
vNames = vNames + ['C1', 'C2', 'C3', 'neffPSF', 'neffModel', 'chi2PSF', 'chi2Model', 'Cmod', 'bestA', \
'bestARMS', 'bestSig', 'bestSigErr']
def plot2Chistograms(Cs, Cg, Xmin, Xmax, Ymin, Ymax, Xlabel, Ylabel, bins=20, title=''):
limits = [(Xmin, Xmax, Ymin, Ymax)]
labels = [Xlabel, Ylabel]
ax.set_xlim(Xmin, Xmax)
ax.set_ylim(Ymin, Ymax)
ax.set_xlabel(Xlabel, fontsize=12)
ax.set_ylabel(Ylabel, fontsize=12)
plt.tick_params(axis='both', which='major', labelsize=15)
xTitle = Xmin + 0.05*(Xmax-Xmin)
yTitle = Ymax + 0.05*(Ymax-Ymin)
ax.text(xTitle, yTitle, title)
# plot a histogram
ax.hist(Cs, bins=bins, normed=True, facecolor='red', histtype='stepfilled', alpha=0.4)
ax.hist(Cg, bins=bins, normed=True, facecolor='blue', histtype='stepfilled', alpha=0.4)
def plotROC(Cs, Cg, Climit=50, title=''):
ax.set_xlim(Climit, 100)
ax.set_ylim(Climit, 100)
ax.set_xlabel('completeness (\%)', fontsize=12)
ax.set_ylabel('purity (\%)', fontsize=12)
plt.tick_params(axis='both', which='major', labelsize=15)
Ngrid = 1000
Cgrid = np.linspace(Xmin, Xmax, Ngrid)
ComplS = 0*Cgrid
PurityS = 0*Cgrid + 100.0
ComplG = 0*Cgrid
PurityG = 0*Cgrid + 100.0
for i in range(0,Ngrid):
C = Cgrid[i]
CsOK = Cs[Cs < C]
CgOK = Cg[Cg < C]
if (CsOK.size>0):
ComplS[i] = 100.0*CsOK.size / Cs.size
PurityS[i] = 100.0*CsOK.size / (CsOK.size + CgOK.size)
CsOK2 = Cs[Cs > C]
CgOK2 = Cg[Cg > C]
if (CgOK2.size>0):
ComplG[i] = 100.0*CgOK2.size / Cg.size
PurityG[i] = 100.0*CgOK2.size / (CsOK2.size + CgOK2.size)
ax.plot(ComplS, PurityS, '-r', lw=2)
ax.plot(ComplG, PurityG, '-b', lw=2)
for CL in [50, 60, 70, 80, 90]:
ax.plot([CL, CL], [0.0, 100], '-k', lw=1)
ax.plot([0, 100], [CL, CL], '-k', lw=1)
ax.plot([CL+5, CL+5], [0.0, 100], '--k', lw=1)
ax.plot([0, 100], [CL+5, CL+5], '--k', lw=1)
#--------------------
# names for classification quantities
C1name = '$C_{Sebok}$'
C2name = '$m_{psf}-m_{mod}$'
C3name = '$\chi^2_{psf}-\chi^2_{mod}$'
C4name = '$\sigma \,\, (pixel)$'
C1 = v[vNames.index('C1')]
C2 = 2.5*np.log10(v[vNames.index('C2')])
C3 = v[vNames.index('C3')]
C4 = v[vNames.index('bestSig')]
sC1 = v[vNames.index('sC1')]
sC2 = 2.5*np.log10(v[vNames.index('sC2')])
sC3 = v[vNames.index('sC3')]
sC4 = v[vNames.index('SbestSig')]
## Create figure and subplots
fig = plt.figure(figsize=(8, 9))
fig.subplots_adjust(wspace=0.27, hspace=0.35, left=0.12, right=0.94, bottom=0.05, top=0.95)
Climit = 66
Climit = 46
# Climit = 0
bins = 40
## chi2 classification
# plot histograms
ax = plt.subplot(421)
Xlabel = C3name
Ylabel = '$n / (N\Delta_{bin})$'
Xmin = np.min(sC3) - 1
Xmax = np.max(C3)
Xmax = 25.0
plot2Chistograms(sC3, C3, Xmin=Xmin, Xmax=Xmax, Ymin=0.0, Ymax=0.5, Xlabel=Xlabel, Ylabel=Ylabel, bins=bins, title=title)
# plot ROC curves
ax = plt.subplot(422)
plotROC(sC3, C3, Climit=Climit, title='')
## Csebok classification
# plot histograms
ax = plt.subplot(423)
Xlabel = C1name
Ylabel = '$n / (N\Delta_{bin})$'
Xmin = np.min(sC1) - 0.01
Xmax = np.max(C1)
# Xmax = 1.17
plot2Chistograms(sC1, C1, Xmin=Xmin, Xmax=Xmax, Ymin=0.0, Ymax=0.5, Xlabel=Xlabel, Ylabel=Ylabel, bins=bins, title='')
# plot2Chistograms(sC1, C1, Xmin=Xmin, Xmax=Xmax, Ymin=0.0, Ymax=45.0, Xlabel=Xlabel, Ylabel=Ylabel, bins=bins, title='')
# plot ROC curves
ax = plt.subplot(424)
plotROC(sC1, C1, Climit=Climit, title='')
## Cmod/Cpsf classification
# plot histograms
ax = plt.subplot(425)
Xlabel = C2name
Ylabel = '$n / (N\Delta_{bin})$'
Xmin = np.min(sC2) - 0.02
Xmax = np.max(C2)
Xmax = 0.7
plot2Chistograms(sC2, C2, Xmin=Xmin, Xmax=Xmax, Ymin=0.0, Ymax=15.0, Xlabel=Xlabel, Ylabel=Ylabel, bins=bins, title='')
# plot ROC curves
ax = plt.subplot(426)
plotROC(sC2, C2, Climit=Climit, title='')
## best sigma classification
# plot histograms
ax = plt.subplot(427)
Xlabel = C4name
Ylabel = '$n / (N\Delta_{bin})$'
Xmin = np.min(sC4) - 0.2
Xmax = np.max(C4)
Xmax = 2.2
plot2Chistograms(sC4, C4, Xmin=Xmin, Xmax=Xmax, Ymin=0.0, Ymax=5.0, Xlabel=Xlabel, Ylabel=Ylabel, bins=bins, title='')
# plot ROC curves
ax = plt.subplot(428)
plotROC(sC4, C4, Climit=Climit, title='')
if (name is None):
plt.show()
else:
print 'saving plot to:', name
plt.savefig(name, bbox_inches='tight')
def chi2plot(oneDpixels, image, bestModel, chiPixSig, chiPixCmod, chi2image, sigtrue, sigmaML, CmodML):
fig = plt.figure(figsize=(8, 8))
fig.subplots_adjust(left=0.08, bottom=0.15, right=0.95, top=0.90, wspace=0.29, hspace=0.46)
## image with noise
ax = fig.add_subplot(221)
ax.set_title('data image', fontsize=14)
plt.imshow(image, origin='lower', interpolation='nearest',
extent=(oneDpixels[0], oneDpixels[-1], oneDpixels[0], oneDpixels[-1]),
cmap=plt.cm.binary, aspect='auto')
plt.clim(-20, 100)
plt.colorbar().set_label(r'$counts$', fontsize=14)
ax.set_xlabel(r'x (pixels)', fontsize=12)
ax.set_ylabel(r'y (pixels)', fontsize=12)
## chi2 image
ax = fig.add_subplot(222)
ax.set_title('ln($\chi^2_{dof}$) image', fontsize=14)
Lchi2image = np.log(chi2image/image.size)
# pretty color map
plt.imshow(Lchi2image, origin='lower',
extent=(chiPixSig[0], chiPixSig[-1], chiPixCmod[0], chiPixCmod[-1]),
cmap=plt.cm.RdYlGn, aspect='auto')
# mark true values
ax.plot(sigtrue, 1000.0, 'o', color='blue', alpha=0.75)
# mark ML solution: (sigmaML, CmodML)
ax.plot(sigmaML, CmodML, '+', color='blue', alpha=0.99)
print 'chi2plot: sigtrue, sigmaML, CmodML=', sigtrue, sigmaML, CmodML
# legend
plt.clim(np.min(Lchi2image), np.max(Lchi2image))
plt.colorbar().set_label(r'ln($\chi^2_{dof}$)', fontsize=14)
# contours
plt.contour(chiPixSig, chiPixCmod, convert_to_stdev(Lchi2image),
levels=(0.683, 0.955, 0.997),
colors='k')
ax.set_xlabel(r'$\sigma$ (pixel)', fontsize=12)
ax.set_ylabel(r'$C_{mod}$ (counts)', fontsize=12)
## best-fit model image
ax = fig.add_subplot(223)
ax.set_title('best-fit model', fontsize=14)
plt.imshow(bestModel, origin='lower', interpolation='nearest',
extent=(oneDpixels[0], oneDpixels[-1], oneDpixels[0], oneDpixels[-1]),
cmap=plt.cm.binary, aspect='auto')
plt.clim(-20, 100)
plt.colorbar().set_label(r'$counts$', fontsize=14)
ax.set_xlabel(r'x (pixels)', fontsize=12)
ax.set_ylabel(r'y (pixels)', fontsize=12)
## residual difference image - best-fit model
ax = fig.add_subplot(224)
ax.set_title('data - model residuals', fontsize=14)
diffimage = image - bestModel
plt.imshow(diffimage, origin='lower', interpolation='nearest',
extent=(oneDpixels[0], oneDpixels[-1], oneDpixels[0], oneDpixels[-1]),
cmap=plt.cm.binary, aspect='auto')
plt.clim(-60, 60)
plt.colorbar().set_label(r'$counts$', fontsize=14)
ax.set_xlabel(r'x (pixels)', fontsize=12)
ax.set_ylabel(r'y (pixels)', fontsize=12)
name = None
if (name is None):
plt.show()
else:
print 'saving plot to:', name
plt.savefig(name, bbox_inches='tight')
def chi2plotMarginal(chiPixSig, chiPixCmod, chi2image, sigtrue, sigmaML, CmodML):
fig = plt.figure(figsize=(8, 8))
fig.subplots_adjust(left=0.08, bottom=0.15, right=0.95, top=0.90, wspace=0.29, hspace=0.46)
## go from chi2 to ln(L):
# since L = exp(-chi2/2)
# lnL = -1/2 * chi2
lnL = -0.5*chi2image
lnL -= lnL.max()
lnL[lnL < -10] = -10 # truncate for clean plotting
## lnL image
ax = fig.add_axes([0.35, 0.35, 0.45, 0.6], xticks=[], yticks=[])
ax.set_title('ln(L) image', fontsize=14)
# pretty color map
plt.imshow(lnL, origin='lower',
extent=(chiPixSig[0], chiPixSig[-1], chiPixCmod[0], chiPixCmod[-1]),
cmap=plt.cm.RdYlGn, aspect='auto')
# colorbar
cax = plt.axes([0.82, 0.35, 0.02, 0.6])
cb = plt.colorbar(cax=cax)
cb.set_label(r'$lnL(\sigma, C_{mod})$', fontsize=14)
plt.clim(np.min(lnL), np.max(lnL))
# contours WHY IS THIS NOT WORKING??
plt.contour(chiPixSig, chiPixCmod, convert_to_stdev(lnL),
levels=(0.683, 0.955, 0.997),
colors='k')
# mark true values
ax.plot(sigtrue, 1000.0, 'o', color='red', alpha=0.75)
# mark ML solution: (sigmaML, CmodML)
ax.plot(sigmaML, CmodML, 'x', color='white', alpha=0.99, lw=35)
# compute marginal projections
p_sigma = np.exp(lnL).sum(0)
p_Cmod = np.exp(lnL).sum(1)
# and p(C|sigma=0)
L = np.exp(lnL)
L0 = L[:,0]
pCmod0 = L0 / np.max(L0) * np.max(p_Cmod)
ax1 = fig.add_axes([0.35, 0.1, 0.45, 0.23], yticks=[])
ax1.plot(chiPixSig, p_sigma, '-k')
ax1.set_xlabel(r'$\sigma$ (pixel)', fontsize=12)
ax1.set_ylabel(r'$p(\sigma)$', fontsize=12)
ax1.set_xlim(np.min(chiPixSig), np.max(chiPixSig))
ax2 = fig.add_axes([0.15, 0.35, 0.18, 0.6], xticks=[])
ax2.plot(p_Cmod, chiPixCmod, '-k')
ax2.plot(pCmod0, chiPixCmod, '--b')
ax2.set_ylabel(r'$C_{mod}$ (counts)', fontsize=12)
ax2.set_xlabel(r'$p(C_{mod})$', fontsize=12)
ax2.set_xlim(ax2.get_xlim()[::-1]) # reverse x axis
ax2.set_ylim(np.min(chiPixCmod), np.max(chiPixCmod))
name = None
if (name is None):
plt.show()
else:
print 'saving plot to:', name
plt.savefig(name, bbox_inches='tight')
def figCcomparison2v0(datafile, name = None, title=''):
#### read data VOLATILE: assumes SGall*dat files with the vectors below
v = np.loadtxt(datafile)
vNames = ['seta', 'sC1', 'sC2', 'sC3', 'SneffPSF', 'SneffModel', 'Schi2PSF', 'Schi2Model', 'sCmod', 'SbestA', \
'SbestARMS', 'SbestSig', 'SbestSigErr']
vNames = vNames + ['eta', 'C1', 'C2', 'C3', 'neffPSF', 'neffModel', 'chi2PSF', 'chi2Model', 'Cmod', 'bestA', \
'bestARMS', 'bestSig', 'bestSigErr']
def plotPanel(sX, sY, gX, gY, xMin, xMax, yMin, yMax, xLabel, yLabel, title=''):
## bin (astroML code)
# axes limits
range = np.zeros((2,2))
range[0,0]=xMin
range[0,1]=xMax
range[1,0]=yMin
range[1,1]=yMax
NS, xedgesS, yedgesS = binned_statistic_2d(sX, sY, sY,'count', bins=20, range=range)
NG, xedgesG, yedgesG = binned_statistic_2d(gX, gY, gY,'count', bins=20, range=range)
## plot
## galaxies are blue contours
levels = np.linspace(0, np.log10(NG.max()), 7)[2:]
# plt.contour(np.log10(NG.T), levels, colors='b', linewidths=2, extent=[xedgesG[0], xedgesG[-1], yedgesG[0], yedgesG[-1]])
plt.scatter(gX, gY, color='blue', s=5, linewidths=1, alpha=0.2)
# stars are copper continuous map
cmap = plt.cm.copper
cmap.set_bad('w', 0.0)
# plt.imshow(np.log10(NS.T), origin='lower', extent=[xedgesS[0], xedgesS[-1], yedgesS[0], yedgesS[-1]], aspect='auto', interpolation='nearest', cmap=cmap)
plt.scatter(sX, sY, color='red', s=10, linewidths=1, alpha=0.25)
plt.xlim(xMin, xMax)
plt.ylim(yMin, yMax)
plt.xlabel(xLabel, fontsize=16)
plt.ylabel(yLabel, fontsize=16)
xTitle = xMin + 0.05*(xMax-xMin)
yTitle = yMax + 0.05*(yMax-yMin)
ax.text(xTitle, yTitle, title)
#--------------------
# names for classification quantities (order as in the paper)
C1name = '$C_{SDSS}=m_{psf}-m_{mod}$'
C2name = '$C_{Sebok}$'
C3name = '$\Delta \chi^2 = \chi^2_{psf}-\chi^2_{mod}$'
C4name = '$C_{Bayes}$'
C5name = '$C_{spread}$'
C6name = '$\sigma \,\, (pixel)$'
# corresponding vectors (order not corresponding to the above for "historic" reasons)
# model
neffpsf = v[vNames.index('neffPSF')]
neffmod = v[vNames.index('neffModel')]
eta = v[vNames.index('eta')]
C1 = 2.5*np.log10(v[vNames.index('C2')])
C2 = v[vNames.index('C2')] * np.sqrt(neffpsf/neffmod)
C3 = v[vNames.index('C3')]
C4 = v[vNames.index('C1')]
C5 = eta*C2 - 1
C6 = v[vNames.index('bestSig')]
# psf
Sneffpsf = v[vNames.index('SneffPSF')]
Sneffmod = v[vNames.index('SneffModel')]
seta = v[vNames.index('seta')]
sC1 = 2.5*np.log10(v[vNames.index('sC2')])
sC2 = v[vNames.index('sC2')] * np.sqrt(Sneffpsf/Sneffmod)
sC3 = v[vNames.index('sC3')]
sC4 = v[vNames.index('sC1')]
sC5 = seta*sC2 - 1
sC6 = v[vNames.index('SbestSig')]
print '<eta>+-rms, min/max = ', np.median(eta), np.std(eta), np.min(eta), np.max(eta)
print '<Seta>+-rms, min/max = ', np.median(seta), np.std(seta), np.min(seta), np.max(seta)
print 'CBayes min/max:', np.min(C4), np.max(C4)
# axes limits
C1min = -0.05
C1max = 0.7
C2min = 0.99
C2max = 1.12
C3min = -2.0
C3max = 22.0
C4min = -5.0
C4max = 25.0
C5min = -0.01
C5max = 0.19
C6min = -0.05
C6max = 1.9
## Create figure and subplots
fig = plt.figure(figsize=(8, 8))
fig.subplots_adjust(wspace=0.25, hspace=0.25, left=0.1, right=0.95, bottom=0.12, top=0.95)
#
ax = plt.subplot(321)
xLabel = C1name
yLabel = C4name
xMin = C1min
xMax= C1max
yMin=C4min
yMax=C4max
plotPanel(sC1, sC4, C1, C4, xMin, xMax, yMin, yMax, xLabel, yLabel, title=title)
#
ax = plt.subplot(322)
xLabel = C1name
yLabel = C3name
xMin = C1min
xMax= C1max
yMin= C3min
yMax= C3max
plotPanel(sC1, sC3, C1, C3, xMin, xMax, yMin, yMax, xLabel, yLabel)
#
ax = plt.subplot(323)
xLabel = C1name
yLabel = C6name
xMin = C1min
xMax= C1max
yMin= C6min
yMax= C6max
plotPanel(sC1, sC6, C1, C6, xMin, xMax, yMin, yMax, xLabel, yLabel)
#
ax = plt.subplot(324)
xLabel = C4name
yLabel = C3name
xMin = C4min
xMax= C4max
yMin= C3min
yMax= C3max
plotPanel(sC4, sC3, C4, C3, xMin, xMax, yMin, yMax, xLabel, yLabel)
#
ax = plt.subplot(325)
xLabel = C4name
yLabel = C2name
xMin = C4min
xMax= C4max
yMin= C2min
yMax= C2max
plotPanel(sC4, sC2, C4, C2, xMin, xMax, yMin, yMax, xLabel, yLabel)
#
ax = plt.subplot(326)
xLabel = C4name
yLabel = C5name
xMin = C4min
xMax= C4max
yMin= C5min
yMax= C5max
plotPanel(sC4, sC5, C4, C5, xMin, xMax, yMin, yMax, xLabel, yLabel)
if (name is None):
plt.show()
else:
print 'saving plot to:', name
plt.savefig(name, bbox_inches='tight')
if (0):
Cmod = v[vNames.index('Cmod')]
bestA = v[vNames.index('bestA')]
sCmod = v[vNames.index('sCmod')]
SbestA = v[vNames.index('SbestA')]
return Cmod, bestA, sCmod, SbestA
def figCcomparison2(datafile, name = None, title=''):
#### read data VOLATILE: assumes SGall*dat files with the vectors below
v = np.loadtxt(datafile)
vNames = ['seta', 'sC1', 'sC2', 'sC3', 'SneffPSF', 'SneffModel', 'Schi2PSF', 'Schi2Model', 'sCmod', 'SbestA', \
'SbestARMS', 'SbestSig', 'SbestSigErr']
vNames = vNames + ['eta', 'C1', 'C2', 'C3', 'neffPSF', 'neffModel', 'chi2PSF', 'chi2Model', 'Cmod', 'bestA', \
'bestARMS', 'bestSig', 'bestSigErr']
def plotPanel(sX, sY, gX, gY, xMin, xMax, yMin, yMax, xLabel, yLabel, title=''):
## bin (astroML code)
# axes limits
range = np.zeros((2,2))
range[0,0]=xMin
range[0,1]=xMax
range[1,0]=yMin
range[1,1]=yMax
NS, xedgesS, yedgesS = binned_statistic_2d(sX, sY, sY,'count', bins=20, range=range)
NG, xedgesG, yedgesG = binned_statistic_2d(gX, gY, gY,'count', bins=20, range=range)
## plot
## galaxies are blue contours
levels = np.linspace(0, np.log10(NG.max()), 7)[2:]
# plt.contour(np.log10(NG.T), levels, colors='b', linewidths=2, extent=[xedgesG[0], xedgesG[-1], yedgesG[0], yedgesG[-1]])
plt.scatter(gX, gY, color='blue', s=5, linewidths=1, alpha=0.2)
# stars are copper continuous map
cmap = plt.cm.copper
cmap.set_bad('w', 0.0)
# plt.imshow(np.log10(NS.T), origin='lower', extent=[xedgesS[0], xedgesS[-1], yedgesS[0], yedgesS[-1]], aspect='auto', interpolation='nearest', cmap=cmap)
plt.scatter(sX, sY, color='red', s=10, linewidths=1, alpha=0.25)
plt.xlim(xMin, xMax)
plt.ylim(yMin, yMax)
plt.xlabel(xLabel, fontsize=16)
plt.ylabel(yLabel, fontsize=16)
xTitle = xMin + 0.05*(xMax-xMin)
yTitle = yMax + 0.05*(yMax-yMin)
ax.text(xTitle, yTitle, title)
#--------------------
# names for classification quantities (order as in the paper)
C1name = '$C_{SDSS}=m_{psf}-m_{mod}$'
C2name = '$C_{Sebok}$'
C3name = '$\Delta \chi^2 = \chi^2_{psf}-\chi^2_{mod}$'
C4name = '$C_{Bayes}$'
C5name = '$C_{spread}$'
C6name = '$\sigma \,\, (pixel)$'
# corresponding vectors (order not corresponding to the above for "historic" reasons)
# model
neffpsf = v[vNames.index('neffPSF')]
neffmod = v[vNames.index('neffModel')]
eta = v[vNames.index('eta')]
C1 = 2.5*np.log10(v[vNames.index('C2')])
C2 = v[vNames.index('C2')] * np.sqrt(neffpsf/neffmod)
C3 = v[vNames.index('C3')]
C4 = v[vNames.index('C1')]
C5 = eta*C2 - 1
C6 = v[vNames.index('bestSig')]
# psf
Sneffpsf = v[vNames.index('SneffPSF')]
Sneffmod = v[vNames.index('SneffModel')]
seta = v[vNames.index('seta')]
sC1 = 2.5*np.log10(v[vNames.index('sC2')])
sC2 = v[vNames.index('sC2')] * np.sqrt(Sneffpsf/Sneffmod)
sC3 = v[vNames.index('sC3')]
sC4 = v[vNames.index('sC1')]
sC5 = seta*sC2 - 1
sC6 = v[vNames.index('SbestSig')]
print '<eta>+-rms, min/max = ', np.median(eta), np.std(eta), np.min(eta), np.max(eta)
print '<Seta>+-rms, min/max = ', np.median(seta), np.std(seta), np.min(seta), np.max(seta)
print 'CBayes min/max:', np.min(C4), np.max(C4)
# axes limits
C1min = -0.05
C1max = 0.7
C2min = 0.99
C2max = 1.12
C3min = -2.0
C3max = 22.0
C4min = -5.0
C4max = 25.0
C5min = -0.01
C5max = 0.19
C6min = -0.05
C6max = 1.9
## Create figure and subplots
fig = plt.figure(figsize=(8, 8))
fig.subplots_adjust(wspace=0.23, hspace=0.28, left=0.1, right=0.95, bottom=0.12, top=0.95)
#
ax = plt.subplot(321)
xLabel = C1name
yLabel = C6name
xMin = C1min
xMax= C1max
yMin=C6min
yMax=C6max
plotPanel(sC1, sC6, C1, C6, xMin, xMax, yMin, yMax, xLabel, yLabel, title=title)
#
ax = plt.subplot(322)
xLabel = C3name
yLabel = C4name
xMin = C3min
xMax= C3max
yMin= C4min
yMax= C4max
plotPanel(sC3, sC4, C3, C4, xMin, xMax, yMin, yMax, xLabel, yLabel)
#
ax = plt.subplot(323)
xLabel = C1name
yLabel = C3name
xMin = C1min
xMax= C1max
yMin= C3min
yMax= C3max
plotPanel(sC1, sC3, C1, C3, xMin, xMax, yMin, yMax, xLabel, yLabel)
#
ax = plt.subplot(324)
xLabel = C3name
yLabel = C5name
xMin = C3min
xMax= C3max
yMin= C5min
yMax= C5max
plotPanel(sC3, sC5, C3, C5, xMin, xMax, yMin, yMax, xLabel, yLabel)
#
ax = plt.subplot(325)
xLabel = C1name
yLabel = C5name
xMin = C1min
xMax= C1max
yMin= C5min
yMax= C5max
plotPanel(sC1, sC5, C1, C5, xMin, xMax, yMin, yMax, xLabel, yLabel)
#
ax = plt.subplot(326)
xLabel = C2name
yLabel = C5name
xMin = C2min
xMax= C2max
yMin= C5min
yMax= C5max
plotPanel(sC2, sC5, C2, C5, xMin, xMax, yMin, yMax, xLabel, yLabel)
if (name is None):
plt.show()
else:
print 'saving plot to:', name
plt.savefig(name, bbox_inches='tight')
if (0):
Cmod = v[vNames.index('Cmod')]
bestA = v[vNames.index('bestA')]
sCmod = v[vNames.index('sCmod')]
SbestA = v[vNames.index('SbestA')]
return Cmod, bestA, sCmod, SbestA
def figClassification2(datafile, name = None, title=''):
#### read data VOLATILE: assumes SGsimple*dat files with the vectors below
v = np.loadtxt(datafile)
vNames = ['seta', 'sC1', 'sC2', 'sC3', 'SneffPSF', 'SneffModel', 'Schi2PSF', 'Schi2Model', 'sCmod', 'SbestA', \
'SbestARMS', 'SbestSig', 'SbestSigErr']
vNames = vNames + ['eta', 'C1', 'C2', 'C3', 'neffPSF', 'neffModel', 'chi2PSF', 'chi2Model', 'Cmod', 'bestA', \
'bestARMS', 'bestSig', 'bestSigErr']
def plot2Chistograms(Cs, Cg, Xmin, Xmax, Ymin, Ymax, Xlabel, Ylabel, bins=20, title=''):
limits = [(Xmin, Xmax, Ymin, Ymax)]
labels = [Xlabel, Ylabel]
ax.set_xlim(Xmin, Xmax)
ax.set_ylim(Ymin, Ymax)
ax.set_xlabel(Xlabel, fontsize=12)
ax.set_ylabel(Ylabel, fontsize=12)
plt.tick_params(axis='both', which='major', labelsize=15)
xTitle = Xmin + 0.05*(Xmax-Xmin)
yTitle = Ymax + 0.05*(Ymax-Ymin)
ax.text(xTitle, yTitle, title)
# plot a histogram
ax.hist(Cs, bins=bins, normed=True, facecolor='red', histtype='stepfilled', alpha=0.4)
ax.hist(Cg, bins=bins, normed=True, facecolor='blue', histtype='stepfilled', alpha=0.4)
def plotROC(Cs, Cg, Climit=50, title=''):
ax.set_xlim(Climit, 100)
ax.set_ylim(Climit, 100)
ax.set_xlabel('completeness (\%)', fontsize=12)
ax.set_ylabel('purity (\%)', fontsize=12)
plt.tick_params(axis='both', which='major', labelsize=15)
Ngrid = 1000
Cgrid = np.linspace(Xmin, Xmax, Ngrid)
ComplS = 0*Cgrid
PurityS = 0*Cgrid + 100.0
ComplG = 0*Cgrid
PurityG = 0*Cgrid + 100.0
for i in range(0,Ngrid):
C = Cgrid[i]
CsOK = Cs[Cs < C]
CgOK = Cg[Cg < C]
if (CsOK.size>0):
ComplS[i] = 100.0*CsOK.size / Cs.size
PurityS[i] = 100.0*CsOK.size / (CsOK.size + CgOK.size)
CsOK2 = Cs[Cs > C]
CgOK2 = Cg[Cg > C]
if (CgOK2.size>0):
ComplG[i] = 100.0*CgOK2.size / Cg.size
PurityG[i] = 100.0*CgOK2.size / (CsOK2.size + CgOK2.size)
ax.plot(ComplS, PurityS, '-r', lw=3)
ax.plot(ComplG, PurityG, '-b', lw=3)
for CL in [50, 60, 70, 80, 90]:
ax.plot([CL, CL], [0.0, 100], '-k', lw=1, alpha=0.4)
ax.plot([0, 100], [CL, CL], '-k', lw=1, alpha=0.4)
ax.plot([CL+5, CL+5], [0.0, 100], '--k', lw=1, alpha=0.4)
ax.plot([0, 100], [CL+5, CL+5], '--k', lw=1, alpha=0.4)
#--------------------
# names for classification quantities (order as in the paper)
C1name = '$C_{SDSS}=m_{psf}-m_{mod}$'
C2name = '$C_{Sebok}$'
C3name = '$\Delta \chi^2 = \chi^2_{psf}-\chi^2_{mod}$'
C4name = '$C_{Bayes}$'
C5name = '$C_{spread}$'
C6name = '$\sigma \,\, (pixel)$'
# corresponding vectors (order not corresponding to the above for "historic" reasons)
# model
neffpsf = v[vNames.index('neffPSF')]
neffmod = v[vNames.index('neffModel')]
eta = v[vNames.index('eta')]
C1 = 2.5*np.log10(v[vNames.index('C2')])
C2 = v[vNames.index('C2')] * np.sqrt(neffpsf/neffmod)
C3 = v[vNames.index('C3')]
C4 = v[vNames.index('C1')]
C5 = eta*C2 - 1
C6 = v[vNames.index('bestSig')]
# psf
Sneffpsf = v[vNames.index('SneffPSF')]
Sneffmod = v[vNames.index('SneffModel')]
seta = v[vNames.index('seta')]
sC1 = 2.5*np.log10(v[vNames.index('sC2')])
sC2 = v[vNames.index('sC2')] * np.sqrt(Sneffpsf/Sneffmod)
sC3 = v[vNames.index('sC3')]
sC4 = v[vNames.index('sC1')]
sC5 = seta*sC2 - 1
sC6 = v[vNames.index('SbestSig')]
print '<eta>+-rms, min/max = ', np.median(eta), np.std(eta), np.min(eta), np.max(eta)
print '<Seta>+-rms, min/max = ', np.median(seta), np.std(seta), np.min(seta), np.max(seta)
print 'CBayes min/max:', np.min(C4), np.max(C4)
# axes limits
C1min = -0.05
C1max = 0.7
C2min = 0.99
C2max = 1.12
C3min = -2.0
C3max = 22.0
C4min = -5.0
C4max = 25.0
C5min = -0.01
C5max = 0.19
C6min = -0.05
C6max = 1.9
## Create figure and subplots
fig = plt.figure(figsize=(8, 10))
fig.subplots_adjust(wspace=0.27, hspace=0.51, left=0.12, right=0.94, bottom=0.05, top=0.95)
Climit = 46
bins = 40
## Cmod/Cpsf classification
# plot histograms
ax = plt.subplot(6,2,1)
Xlabel = C1name
Ylabel = '$n / (N\Delta_{bin})$'
Xmin = C1min
Xmax = C1max
plot2Chistograms(sC1, C1, Xmin=Xmin, Xmax=Xmax, Ymin=0.0, Ymax=12.0, Xlabel=Xlabel, Ylabel=Ylabel, bins=bins, title=title)
# plot ROC curves
ax = plt.subplot(6,2,2)
plotROC(sC1, C1, Climit=Climit, title='')
## Csebok classification
# plot histograms
ax = plt.subplot(6,2,3)
Xlabel = C2name
Ylabel = '$n / (N\Delta_{bin})$'
Xmin = C2min
Xmax = C2max
plot2Chistograms(sC2, C2, Xmin=Xmin, Xmax=Xmax, Ymin=0.0, Ymax=51.0, Xlabel=Xlabel, Ylabel=Ylabel, bins=bins, title='')
# plot ROC curves
ax = plt.subplot(6,2,4)
plotROC(sC2, C2, Climit=Climit, title='')
## chi2 classification
# plot histograms
ax = plt.subplot(6,2,5)
Xlabel = C3name
Ylabel = '$n / (N\Delta_{bin})$'
Xmin = C3min
Xmax = C3max
plot2Chistograms(sC3, C3, Xmin=Xmin, Xmax=Xmax, Ymin=0.0, Ymax=0.5, Xlabel=Xlabel, Ylabel=Ylabel, bins=bins, title='')
# plot ROC curves
ax = plt.subplot(6,2,6)
plotROC(sC3, C3, Climit=Climit, title='')
## Bayes classification
# plot histograms
ax = plt.subplot(6,2,7)
Xlabel = C4name
Ylabel = '$n / (N\Delta_{bin})$'
Xmin = C4min
Xmax = C4max
plot2Chistograms(sC4, C4, Xmin=Xmin, Xmax=Xmax, Ymin=0.0, Ymax=0.5, Xlabel=Xlabel, Ylabel=Ylabel, bins=bins, title='')
# plot ROC curves
ax = plt.subplot(6,2,8)
plotROC(sC4, C4, Climit=Climit, title='')
## spread_model classification
# plot histograms
ax = plt.subplot(6,2,9)
Xlabel = C5name
Ylabel = '$n / (N\Delta_{bin})$'
Xmin = C5min
Xmax = C5max
plot2Chistograms(sC5, C5, Xmin=Xmin, Xmax=Xmax, Ymin=0.0, Ymax=50.0, Xlabel=Xlabel, Ylabel=Ylabel, bins=bins, title='')
# plot ROC curves
ax = plt.subplot(6,2,10)
plotROC(sC5, C5, Climit=Climit, title='')
## best sigma classification
# plot histograms
ax = plt.subplot(6,2,11)
Xlabel = C6name
Ylabel = '$n / (N\Delta_{bin})$'
Xmin = C6min
Xmax = C6max
plot2Chistograms(sC6, C6, Xmin=Xmin, Xmax=Xmax, Ymin=0.0, Ymax=5.0, Xlabel=Xlabel, Ylabel=Ylabel, bins=bins, title='')
# plot ROC curves
ax = plt.subplot(6,2,12)
plotROC(sC6, C6, Climit=Climit, title='')
if (name is None):
plt.show()
else:
print 'saving plot to:', name
plt.savefig(name, bbox_inches='tight')
def plotSNRplot1(SNR, CE1, CE2, CE3, Xmin=0, Xmax=100, Ymin=40, Ymax=110, name = None, title=''):
if (1):
# 2, 4, 3
C1name = '$C_{SDSS}=m_{psf}-m_{mod}$'
C2name = '$C_{Bayes}$'
C3name = '$\Delta \chi^2 = \chi^2_{psf}-\chi^2_{mod}$'
else:
C1name = ' '
C2name = ' '
C3name = ' '
fig = plt.figure(figsize=(8, 8))
fig.subplots_adjust(left=0.13, bottom=0.15, right=0.95, top=0.90, wspace=0.18, hspace=0.46)
##
ax = fig.add_subplot(111)
ax.set_title(title, fontsize=26)
ax.set_xlim(Xmin, Xmax)
ax.set_ylim(Ymin, Ymax)
ax.set_xlabel(r'SNR', fontsize=26)
ax.set_ylabel(r'C=E (\%)', fontsize=26)
plt.tick_params(axis='both', which='major', labelsize=25)
ax.plot(SNR, CE1, '-k', label=C1name, lw=4)
ax.plot(SNR, CE2, '--b', label=C2name, lw=4)
ax.plot(SNR, CE3, '--r', label=C3name, lw=4)
for CL in [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]:
ax.plot([CL, CL], [0.0, 110], '-k', lw=1, alpha=0.4)
ax.plot([0, 100], [CL, CL], '-k', lw=1, alpha=0.4)
ax.plot([CL-5, CL-5], [0.0, 110], '--k', lw=1, alpha=0.4)
ax.plot([0, 100], [CL-5, CL-5], '--k', lw=1, alpha=0.4)
if (name is None):
plt.show()
else:
print 'saving plot to:', name
plt.savefig(name, bbox_inches='tight')
def plotSNRplot2(SNR1, CE1, SNR2, CE2, SNR3, CE3, Xmin=0, Xmax=100, Ymin=40, Ymax=110, name = None, title=''):
fig = plt.figure(figsize=(8, 8))
fig.subplots_adjust(left=0.13, bottom=0.15, right=0.95, top=0.90, wspace=0.18, hspace=0.46)
##
ax = fig.add_subplot(111)
ax.set_title(title, fontsize=26)
ax.set_xlim(Xmin, Xmax)
ax.set_ylim(Ymin, Ymax)
ax.set_xlabel(r'SNR', fontsize=26)
ax.set_ylabel(r'C=E (\%)', fontsize=26)
plt.tick_params(axis='both', which='major', labelsize=25)
ax.plot(SNR1, CE1, '-k', lw=4)
ax.plot(SNR2, CE2, '--b', lw=4)
ax.plot(SNR3, CE3, '--r', lw=4)
for CL in [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]:
ax.plot([CL, CL], [0.0, 110], '-k', lw=1, alpha=0.4)
ax.plot([0, 100], [CL, CL], '-k', lw=1, alpha=0.4)
ax.plot([CL-5, CL-5], [0.0, 110], '--k', lw=1, alpha=0.4)
ax.plot([0, 100], [CL-5, CL-5], '--k', lw=1, alpha=0.4)
if (name is None):
plt.show()
else:
print 'saving plot to:', name
plt.savefig(name, bbox_inches='tight')
| 31.308231
| 156
| 0.600716
| 5,364
| 35,754
| 3.964019
| 0.08091
| 0.023703
| 0.040634
| 0.017307
| 0.876358
| 0.848987
| 0.832714
| 0.820016
| 0.781216
| 0.772986
| 0
| 0.071446
| 0.210802
| 35,754
| 1,141
| 157
| 31.33567
| 0.682107
| 0.102086
| 0
| 0.754057
| 0
| 0
| 0.109492
| 0.013475
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.006242
| null | null | 0.02372
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
438da00c53f36ee3acce887a6c8a04d2952d0297
| 228
|
py
|
Python
|
EmptyCell.py
|
edo1998/3d-blue-noise
|
b2528f609b26448aeae13af3005917a57cc47e5f
|
[
"CC0-1.0"
] | null | null | null |
EmptyCell.py
|
edo1998/3d-blue-noise
|
b2528f609b26448aeae13af3005917a57cc47e5f
|
[
"CC0-1.0"
] | null | null | null |
EmptyCell.py
|
edo1998/3d-blue-noise
|
b2528f609b26448aeae13af3005917a57cc47e5f
|
[
"CC0-1.0"
] | null | null | null |
class EmptyCell:
def __init__(self, id, x):
self.id = id
self.x = x
def vertices(self):
return None
def face_vertices(self):
return None
def neighbors(self):
return None
| 17.538462
| 30
| 0.557018
| 29
| 228
| 4.206897
| 0.413793
| 0.245902
| 0.344262
| 0.360656
| 0.409836
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.359649
| 228
| 13
| 31
| 17.538462
| 0.835616
| 0
| 0
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.3
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
43b6a1aba63d475f0cfdd2b291a962cca0965782
| 36,201
|
py
|
Python
|
scripts/build_esri_projection_mapping.py
|
captain-igloo/PROJ
|
10fd7871433899c819c932209db28e549c43a8c3
|
[
"MIT"
] | null | null | null |
scripts/build_esri_projection_mapping.py
|
captain-igloo/PROJ
|
10fd7871433899c819c932209db28e549c43a8c3
|
[
"MIT"
] | null | null | null |
scripts/build_esri_projection_mapping.py
|
captain-igloo/PROJ
|
10fd7871433899c819c932209db28e549c43a8c3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: PROJ
# Purpose: Generate mappings between ESRI projection names and parameters and
# their EPSG equivalents.
# Author: Even Rouault <even.rouault at spatialys.com>
#
###############################################################################
# Copyright (c) 2018, Even Rouault <even.rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import yaml
# Map methods from pe_list_projection.csv to WKT2 naming
config_str = """
- Plate_Carree:
WKT2_name:
- EPSG_NAME_METHOD_EQUIDISTANT_CYLINDRICAL
- EPSG_NAME_METHOD_EQUIDISTANT_CYLINDRICAL_SPHERICAL
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
Cond:
- EPSG_NAME_PARAMETER_LATITUDE_1ST_STD_PARALLEL = 0
- Equidistant_Cylindrical:
WKT2_name: EPSG_NAME_METHOD_EQUIDISTANT_CYLINDRICAL
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Standard_Parallel_1: EPSG_NAME_PARAMETER_LATITUDE_1ST_STD_PARALLEL
- Miller_Cylindrical:
WKT2_name: PROJ_WKT2_NAME_METHOD_MILLER_CYLINDRICAL
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Mercator: # Mercator 2SP
WKT2_name: EPSG_NAME_METHOD_MERCATOR_VARIANT_B
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Standard_Parallel_1: EPSG_NAME_PARAMETER_LATITUDE_1ST_STD_PARALLEL
- Gauss_Kruger:
WKT2_name: EPSG_NAME_METHOD_TRANSVERSE_MERCATOR
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Scale_Factor: EPSG_NAME_PARAMETER_SCALE_FACTOR_AT_NATURAL_ORIGIN
- Latitude_Of_Origin: EPSG_NAME_PARAMETER_LATITUDE_OF_NATURAL_ORIGIN
- Transverse_Mercator:
WKT2_name: EPSG_NAME_METHOD_TRANSVERSE_MERCATOR
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Scale_Factor: EPSG_NAME_PARAMETER_SCALE_FACTOR_AT_NATURAL_ORIGIN
- Latitude_Of_Origin: EPSG_NAME_PARAMETER_LATITUDE_OF_NATURAL_ORIGIN
- Albers:
WKT2_name: EPSG_NAME_METHOD_ALBERS_EQUAL_AREA
Params:
- False_Easting: EPSG_NAME_PARAMETER_EASTING_FALSE_ORIGIN
- False_Northing: EPSG_NAME_PARAMETER_NORTHING_FALSE_ORIGIN
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_FALSE_ORIGIN
- Standard_Parallel_1: EPSG_NAME_PARAMETER_LATITUDE_1ST_STD_PARALLEL
- Standard_Parallel_2: EPSG_NAME_PARAMETER_LATITUDE_2ND_STD_PARALLEL
- Latitude_Of_Origin: EPSG_NAME_PARAMETER_LATITUDE_FALSE_ORIGIN
- Sinusoidal:
WKT2_name: PROJ_WKT2_NAME_METHOD_SINUSOIDAL
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Mollweide:
WKT2_name: PROJ_WKT2_NAME_METHOD_MOLLWEIDE
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Eckert_I:
WKT2_name: PROJ_WKT2_NAME_METHOD_ECKERT_I
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Eckert_II:
WKT2_name: PROJ_WKT2_NAME_METHOD_ECKERT_II
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Eckert_III:
WKT2_name: PROJ_WKT2_NAME_METHOD_ECKERT_III
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Eckert_IV:
WKT2_name: PROJ_WKT2_NAME_METHOD_ECKERT_IV
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Eckert_V:
WKT2_name: PROJ_WKT2_NAME_METHOD_ECKERT_V
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Eckert_VI:
WKT2_name: PROJ_WKT2_NAME_METHOD_ECKERT_VI
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Gall_Stereographic:
WKT2_name: PROJ_WKT2_NAME_METHOD_GALL_STEREOGRAPHIC
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Behrmann:
WKT2_name: EPSG_NAME_METHOD_LAMBERT_CYLINDRICAL_EQUAL_AREA_SPHERICAL
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian:
Name: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
Default: 0.0
- Standard_Parallel_1:
Name: EPSG_NAME_PARAMETER_LATITUDE_1ST_STD_PARALLEL
Default: 30.0
- Winkel_I:
WKT2_name: "Winkel I"
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Standard_Parallel_1: EPSG_NAME_PARAMETER_LATITUDE_1ST_STD_PARALLEL
- Winkel_II:
WKT2_name: "Winkel II"
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Standard_Parallel_1: EPSG_NAME_PARAMETER_LATITUDE_1ST_STD_PARALLEL
- Lambert_Conformal_Conic:
- WKT2_name: EPSG_NAME_METHOD_LAMBERT_CONIC_CONFORMAL_1SP
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Standard_Parallel_1: EPSG_NAME_PARAMETER_LATITUDE_OF_NATURAL_ORIGIN
- Scale_Factor: EPSG_NAME_PARAMETER_SCALE_FACTOR_AT_NATURAL_ORIGIN
- Latitude_Of_Origin: EPSG_NAME_PARAMETER_LATITUDE_OF_NATURAL_ORIGIN # should be the same as Standard_Parallel_1
- WKT2_name: EPSG_NAME_METHOD_LAMBERT_CONIC_CONFORMAL_2SP
Params:
- False_Easting: EPSG_NAME_PARAMETER_EASTING_FALSE_ORIGIN
- False_Northing: EPSG_NAME_PARAMETER_NORTHING_FALSE_ORIGIN
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_FALSE_ORIGIN
- Standard_Parallel_1: EPSG_NAME_PARAMETER_LATITUDE_1ST_STD_PARALLEL
- Standard_Parallel_2: EPSG_NAME_PARAMETER_LATITUDE_2ND_STD_PARALLEL
- Latitude_Of_Origin: EPSG_NAME_PARAMETER_LATITUDE_FALSE_ORIGIN
# From GDAL autotest
- WKT2_name: EPSG_NAME_METHOD_LAMBERT_CONIC_CONFORMAL_2SP
Params:
- False_Easting: EPSG_NAME_PARAMETER_EASTING_FALSE_ORIGIN
- False_Northing: EPSG_NAME_PARAMETER_NORTHING_FALSE_ORIGIN
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_FALSE_ORIGIN
- Standard_Parallel_1: EPSG_NAME_PARAMETER_LATITUDE_1ST_STD_PARALLEL
- Standard_Parallel_2: EPSG_NAME_PARAMETER_LATITUDE_2ND_STD_PARALLEL
- Scale_Factor: 1.0
- Latitude_Of_Origin: EPSG_NAME_PARAMETER_LATITUDE_FALSE_ORIGIN
# Tempative mapping. Did not find any example
- WKT2_name: EPSG_NAME_METHOD_LAMBERT_CONIC_CONFORMAL_2SP_MICHIGAN
Params:
- False_Easting: EPSG_NAME_PARAMETER_EASTING_FALSE_ORIGIN
- False_Northing: EPSG_NAME_PARAMETER_NORTHING_FALSE_ORIGIN
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_FALSE_ORIGIN
- Standard_Parallel_1: EPSG_NAME_PARAMETER_LATITUDE_1ST_STD_PARALLEL
- Standard_Parallel_2: EPSG_NAME_PARAMETER_LATITUDE_2ND_STD_PARALLEL
- Scale_Factor: EPSG_NAME_PARAMETER_ELLIPSOID_SCALE_FACTOR
- Latitude_Of_Origin: EPSG_NAME_PARAMETER_LATITUDE_FALSE_ORIGIN
- Polyconic:
WKT2_name: EPSG_NAME_METHOD_AMERICAN_POLYCONIC
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Latitude_Of_Origin: EPSG_NAME_PARAMETER_LATITUDE_OF_NATURAL_ORIGIN
- Quartic_Authalic:
WKT2_name: "Quartic Authalic"
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Loximuthal:
WKT2_name: "Loximuthal"
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Central_Parallel: EPSG_NAME_PARAMETER_LATITUDE_OF_NATURAL_ORIGIN
- Bonne:
WKT2_name: EPSG_NAME_METHOD_BONNE
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Standard_Parallel_1: EPSG_NAME_PARAMETER_LATITUDE_OF_NATURAL_ORIGIN
- Hotine_Oblique_Mercator_Two_Point_Natural_Origin:
WKT2_name: PROJ_WKT2_NAME_METHOD_HOTINE_OBLIQUE_MERCATOR_TWO_POINT_NATURAL_ORIGIN
Params:
- False_Easting: EPSG_NAME_PARAMETER_EASTING_PROJECTION_CENTRE
- False_Northing: EPSG_NAME_PARAMETER_NORTHING_PROJECTION_CENTRE
- Latitude_Of_1st_Point: "Latitude of 1st point"
- Latitude_Of_2nd_Point: "Latitude of 2nd point"
- Scale_Factor: EPSG_NAME_PARAMETER_SCALE_FACTOR_INITIAL_LINE
- Longitude_Of_1st_Point: "Longitude of 1st point"
- Longitude_Of_2nd_Point: "Longitude of 2nd point"
- Latitude_Of_Center: EPSG_NAME_PARAMETER_LATITUDE_PROJECTION_CENTRE
- Stereographic:
WKT2_name: PROJ_WKT2_NAME_METHOD_STEREOGRAPHIC
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Scale_Factor: EPSG_NAME_PARAMETER_SCALE_FACTOR_AT_NATURAL_ORIGIN
- Latitude_Of_Origin: EPSG_NAME_PARAMETER_LATITUDE_OF_NATURAL_ORIGIN
- Equidistant_Conic:
WKT2_name: PROJ_WKT2_NAME_METHOD_EQUIDISTANT_CONIC
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Standard_Parallel_1: EPSG_NAME_PARAMETER_LATITUDE_1ST_STD_PARALLEL
- Standard_Parallel_2: EPSG_NAME_PARAMETER_LATITUDE_2ND_STD_PARALLEL
- Latitude_Of_Origin: EPSG_NAME_PARAMETER_LATITUDE_OF_NATURAL_ORIGIN
- Cassini:
WKT2_name: EPSG_NAME_METHOD_CASSINI_SOLDNER
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Scale_Factor: 1.0 # fixed
- Latitude_Of_Origin: EPSG_NAME_PARAMETER_LATITUDE_OF_NATURAL_ORIGIN
- Van_der_Grinten_I:
WKT2_name: PROJ_WKT2_NAME_METHOD_VAN_DER_GRINTEN
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Robinson:
WKT2_name: PROJ_WKT2_NAME_METHOD_ROBINSON
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Two_Point_Equidistant:
WKT2_name: PROJ_WKT2_NAME_METHOD_TWO_POINT_EQUIDISTANT
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Latitude_Of_1st_Point: "Latitude of 1st point"
- Latitude_Of_2nd_Point: "Latitude of 2nd point"
- Longitude_Of_1st_Point: "Longitude of 1st point"
- Longitude_Of_2nd_Point: "Longitude of 2nd point"
- Azimuthal_Equidistant:
WKT2_name: EPSG_NAME_METHOD_MODIFIED_AZIMUTHAL_EQUIDISTANT
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Latitude_Of_Origin: EPSG_NAME_PARAMETER_LATITUDE_OF_NATURAL_ORIGIN
- Lambert_Azimuthal_Equal_Area:
WKT2_name: EPSG_NAME_METHOD_LAMBERT_AZIMUTHAL_EQUAL_AREA
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Latitude_Of_Origin: EPSG_NAME_PARAMETER_LATITUDE_OF_NATURAL_ORIGIN
- Cylindrical_Equal_Area:
WKT2_name: EPSG_NAME_METHOD_LAMBERT_CYLINDRICAL_EQUAL_AREA_SPHERICAL
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Standard_Parallel_1: EPSG_NAME_PARAMETER_LATITUDE_1ST_STD_PARALLEL
# No example in pe_list_projection.csv: temptative mapping !
- Hotine_Oblique_Mercator_Two_Point_Center:
WKT2_name: PROJ_WKT2_NAME_METHOD_HOTINE_OBLIQUE_MERCATOR_TWO_POINT_NATURAL_ORIGIN
Params:
- False_Easting: EPSG_NAME_PARAMETER_EASTING_PROJECTION_CENTRE
- False_Northing: EPSG_NAME_PARAMETER_NORTHING_PROJECTION_CENTRE
- Latitude_Of_1st_Point: "Latitude of 1st point"
- Latitude_Of_2nd_Point: "Latitude of 2nd point"
- Scale_Factor: EPSG_NAME_PARAMETER_SCALE_FACTOR_INITIAL_LINE
- Longitude_Of_1st_Point: "Longitude of 1st point"
- Longitude_Of_2nd_Point: "Longitude of 2nd point"
- Latitude_Of_Origin: EPSG_NAME_PARAMETER_LATITUDE_OF_NATURAL_ORIGIN
- Hotine_Oblique_Mercator_Azimuth_Natural_Origin:
WKT2_name: EPSG_NAME_METHOD_HOTINE_OBLIQUE_MERCATOR_VARIANT_A
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Scale_Factor: EPSG_NAME_PARAMETER_SCALE_FACTOR_INITIAL_LINE
- Azimuth: EPSG_NAME_PARAMETER_AZIMUTH_INITIAL_LINE
# No EPSG_NAME_PARAMETER_ANGLE_RECTIFIED_TO_SKEW_GRID
- Longitude_Of_Center: EPSG_NAME_PARAMETER_LONGITUDE_PROJECTION_CENTRE
- Latitude_Of_Center: EPSG_NAME_PARAMETER_LATITUDE_PROJECTION_CENTRE
- Hotine_Oblique_Mercator_Azimuth_Center:
WKT2_name: EPSG_NAME_METHOD_HOTINE_OBLIQUE_MERCATOR_VARIANT_B
Params:
- False_Easting: EPSG_NAME_PARAMETER_EASTING_PROJECTION_CENTRE
- False_Northing: EPSG_NAME_PARAMETER_NORTHING_PROJECTION_CENTRE
- Scale_Factor: EPSG_NAME_PARAMETER_SCALE_FACTOR_INITIAL_LINE
- Azimuth: EPSG_NAME_PARAMETER_AZIMUTH_INITIAL_LINE
# No EPSG_NAME_PARAMETER_ANGLE_RECTIFIED_TO_SKEW_GRID
- Longitude_Of_Center: EPSG_NAME_PARAMETER_LONGITUDE_PROJECTION_CENTRE
- Latitude_Of_Center: EPSG_NAME_PARAMETER_LATITUDE_PROJECTION_CENTRE
- Double_Stereographic:
WKT2_name: EPSG_NAME_METHOD_OBLIQUE_STEREOGRAPHIC
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Scale_Factor: EPSG_NAME_PARAMETER_SCALE_FACTOR_AT_NATURAL_ORIGIN
- Latitude_Of_Origin: EPSG_NAME_PARAMETER_LATITUDE_OF_NATURAL_ORIGIN
- Krovak:
- WKT2_name: EPSG_NAME_METHOD_KROVAK
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Pseudo_Standard_Parallel_1: EPSG_NAME_PARAMETER_LATITUDE_PSEUDO_STANDARD_PARALLEL
- Scale_Factor: EPSG_NAME_PARAMETER_SCALE_FACTOR_PSEUDO_STANDARD_PARALLEL
- Azimuth: EPSG_NAME_PARAMETER_COLATITUDE_CONE_AXIS
- Longitude_Of_Center: EPSG_NAME_PARAMETER_LONGITUDE_OF_ORIGIN
- Latitude_Of_Center: EPSG_NAME_PARAMETER_LATITUDE_PROJECTION_CENTRE
- X_Scale: 1.0
- Y_Scale: 1.0
- XY_Plane_Rotation: 0.0
- WKT2_name: EPSG_NAME_METHOD_KROVAK_NORTH_ORIENTED
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Pseudo_Standard_Parallel_1: EPSG_NAME_PARAMETER_LATITUDE_PSEUDO_STANDARD_PARALLEL
- Scale_Factor: EPSG_NAME_PARAMETER_SCALE_FACTOR_PSEUDO_STANDARD_PARALLEL
- Azimuth: EPSG_NAME_PARAMETER_COLATITUDE_CONE_AXIS
- Longitude_Of_Center: EPSG_NAME_PARAMETER_LONGITUDE_OF_ORIGIN
- Latitude_Of_Center: EPSG_NAME_PARAMETER_LATITUDE_PROJECTION_CENTRE
- X_Scale: -1.0
- Y_Scale: 1.0
- XY_Plane_Rotation: 90.0
- New_Zealand_Map_Grid:
WKT2_name: EPSG_NAME_METHOD_NZMG
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Longitude_Of_Origin: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Latitude_Of_Origin: EPSG_NAME_PARAMETER_LATITUDE_OF_NATURAL_ORIGIN
- Orthographic:
WKT2_name: EPSG_NAME_METHOD_ORTHOGRAPHIC
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Longitude_Of_Center: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Latitude_Of_Center: EPSG_NAME_PARAMETER_LATITUDE_OF_NATURAL_ORIGIN
- Winkel_Tripel:
WKT2_name: "Winkel Tripel"
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Standard_Parallel_1: EPSG_NAME_PARAMETER_LATITUDE_1ST_STD_PARALLEL
- Aitoff:
WKT2_name: "Aitoff"
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Flat_Polar_Quartic:
WKT2_name: PROJ_WKT2_NAME_METHOD_FLAT_POLAR_QUARTIC
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Craster_Parabolic:
WKT2_name: "Craster Parabolic"
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Gnomonic:
WKT2_name: PROJ_WKT2_NAME_METHOD_GNOMONIC
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Longitude_Of_Center: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Latitude_Of_Center: EPSG_NAME_PARAMETER_LATITUDE_OF_NATURAL_ORIGIN
- Times:
WKT2_name: PROJ_WKT2_NAME_METHOD_TIMES
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Vertical_Near_Side_Perspective:
WKT2_name: EPSG_NAME_METHOD_VERTICAL_PERSPECTIVE
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Longitude_Of_Center: EPSG_NAME_PARAMETER_LONGITUDE_TOPOGRAPHIC_ORIGIN
- Latitude_Of_Center: EPSG_NAME_PARAMETER_LATITUDE_TOPOGRAPHIC_ORIGIN
- Height: EPSG_NAME_PARAMETER_VIEWPOINT_HEIGHT
- Stereographic_North_Pole:
WKT2_name: EPSG_NAME_METHOD_POLAR_STEREOGRAPHIC_VARIANT_B
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_ORIGIN
- Standard_Parallel_1: EPSG_NAME_PARAMETER_LATITUDE_STD_PARALLEL
Cond:
- EPSG_NAME_PARAMETER_LATITUDE_STD_PARALLEL > 0
- Stereographic_South_Pole:
WKT2_name: EPSG_NAME_METHOD_POLAR_STEREOGRAPHIC_VARIANT_B
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_ORIGIN
- Standard_Parallel_1: EPSG_NAME_PARAMETER_LATITUDE_STD_PARALLEL
Cond:
- EPSG_NAME_PARAMETER_LATITUDE_STD_PARALLEL < 0
- Rectified_Skew_Orthomorphic_Natural_Origin:
WKT2_name: EPSG_NAME_METHOD_HOTINE_OBLIQUE_MERCATOR_VARIANT_A
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Scale_Factor: EPSG_NAME_PARAMETER_SCALE_FACTOR_INITIAL_LINE
- Azimuth: EPSG_NAME_PARAMETER_AZIMUTH_INITIAL_LINE
- Longitude_Of_Center: EPSG_NAME_PARAMETER_LONGITUDE_PROJECTION_CENTRE
- Latitude_Of_Center: EPSG_NAME_PARAMETER_LATITUDE_PROJECTION_CENTRE
- XY_Plane_Rotation: EPSG_NAME_PARAMETER_ANGLE_RECTIFIED_TO_SKEW_GRID
# temptative mapping: no example
- Rectified_Skew_Orthomorphic_Center:
WKT2_name: EPSG_NAME_METHOD_HOTINE_OBLIQUE_MERCATOR_VARIANT_B
Params:
- False_Easting: EPSG_NAME_PARAMETER_EASTING_PROJECTION_CENTRE
- False_Northing: EPSG_NAME_PARAMETER_NORTHING_PROJECTION_CENTRE
- Scale_Factor: EPSG_NAME_PARAMETER_SCALE_FACTOR_INITIAL_LINE
- Azimuth: EPSG_NAME_PARAMETER_AZIMUTH_INITIAL_LINE
- Longitude_Of_Center: EPSG_NAME_PARAMETER_LONGITUDE_PROJECTION_CENTRE
- Latitude_Of_Center: EPSG_NAME_PARAMETER_LATITUDE_PROJECTION_CENTRE
- XY_Plane_Rotation: EPSG_NAME_PARAMETER_ANGLE_RECTIFIED_TO_SKEW_GRID
- Goode_Homolosine:
WKT2_name: "Goode Homolosine"
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Equidistant_Cylindrical_Ellipsoidal:
WKT2_name: EPSG_NAME_METHOD_EQUIDISTANT_CYLINDRICAL
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Standard_Parallel_1: EPSG_NAME_PARAMETER_LATITUDE_1ST_STD_PARALLEL
- Laborde_Oblique_Mercator:
WKT2_name: EPSG_NAME_METHOD_LABORDE_OBLIQUE_MERCATOR
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Scale_Factor: EPSG_NAME_PARAMETER_SCALE_FACTOR_INITIAL_LINE
- Azimuth: EPSG_NAME_PARAMETER_AZIMUTH_INITIAL_LINE
- Longitude_Of_Center: EPSG_NAME_PARAMETER_LONGITUDE_PROJECTION_CENTRE
- Latitude_Of_Center: EPSG_NAME_PARAMETER_LATITUDE_PROJECTION_CENTRE
- Gnomonic_Ellipsoidal:
WKT2_name: PROJ_WKT2_NAME_METHOD_GNOMONIC
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Longitude_Of_Center: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Latitude_Of_Center: EPSG_NAME_PARAMETER_LATITUDE_OF_NATURAL_ORIGIN
- Wagner_IV:
WKT2_name: PROJ_WKT2_NAME_METHOD_WAGNER_IV
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Latitude_Of_Origin: 0
- Wagner_V:
WKT2_name: PROJ_WKT2_NAME_METHOD_WAGNER_V
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Wagner_VII:
WKT2_name: PROJ_WKT2_NAME_METHOD_WAGNER_VII
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Natural_Earth:
WKT2_name: PROJ_WKT2_NAME_METHOD_NATURAL_EARTH
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Natural_Earth_II:
WKT2_name: PROJ_WKT2_NAME_METHOD_NATURAL_EARTH_II
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Patterson:
WKT2_name: PROJ_WKT2_NAME_METHOD_PATTERSON
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Compact_Miller:
WKT2_name: PROJ_WKT2_NAME_METHOD_COMPACT_MILLER
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Geostationary_Satellite:
WKT2_name: PROJ_WKT2_NAME_METHOD_GEOSTATIONARY_SATELLITE_SWEEP_Y
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Longitude_Of_Center: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Height: "Satellite Height"
- Option: 0.0
- Mercator_Auxiliary_Sphere:
WKT2_name: EPSG_NAME_METHOD_POPULAR_VISUALISATION_PSEUDO_MERCATOR
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Standard_Parallel_1: EPSG_NAME_PARAMETER_LATITUDE_OF_NATURAL_ORIGIN
- Auxiliary_Sphere_Type: 0.0
- Mercator_Variant_A:
WKT2_name: EPSG_NAME_METHOD_MERCATOR_VARIANT_A
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Scale_Factor: EPSG_NAME_PARAMETER_SCALE_FACTOR_AT_NATURAL_ORIGIN
- Mercator_Variant_C:
WKT2_name: EPSG_NAME_METHOD_MERCATOR_VARIANT_B
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Standard_Parallel_1: EPSG_NAME_PARAMETER_LATITUDE_1ST_STD_PARALLEL
- Latitude_Of_Origin: 0
- Transverse_Cylindrical_Equal_Area:
WKT2_name: Transverse Cylindrical Equal Area
Params:
- False_Easting: EPSG_NAME_PARAMETER_FALSE_EASTING
- False_Northing: EPSG_NAME_PARAMETER_FALSE_NORTHING
- Central_Meridian: EPSG_NAME_PARAMETER_LONGITUDE_OF_NATURAL_ORIGIN
- Scale_Factor: EPSG_NAME_PARAMETER_SCALE_FACTOR_AT_NATURAL_ORIGIN
- Latitude_Of_Origin: EPSG_NAME_PARAMETER_LATITUDE_OF_NATURAL_ORIGIN
# Missing/unclear mappings
# Hammer_Aitoff: possibly hammer?
# Hammer_Ellipsoidal: possibly hammer?
# Eckert_Greifendorff: +proj=hammer +W=0.25 +M=1
# Tobler_Cylindrical_I: likely tobmerc, but parameters TBD
# Tobler_Cylindrical_II: likely tobmerc, but parameters TBD
# Missing mappings
# Transverse_Mercator_NGA_2014: utm -- tricky mapping from Central_Meridian to zone
# Polar_Stereographic_Variant_A: ups -- tricky mapping from Latitude_Of_Origin to "+south" when required
# Transverse Mercator: alias for Transverse_Mercator, as seen in ESRI:102470 - ESRI:102489
# The following methods are not currently possible in PROJ:
# Ney_Modified_Conic
# IGAC_Plano_Cartesiano
# Fuller
# Berghaus_Star
# Cube
# Transverse_Mercator_Complex
# Robinson_ARC_INFO
# Local
# Equidistant_Cylindrical_Auxiliary_Sphere
# Aspect_Adaptive_Cylindrical
# Mollweide_Auxiliary_Sphere
# Eckert_VI_Auxiliary_Sphere
# Eckert_IV_Auxiliary_Sphere
# Stereographic_Auxiliary_Sphere
# Van_der_Grinten_I_Auxiliary_Sphere
# Azimuthal_Equidistant_Auxiliary_Sphere
# Lambert_Azimuthal_Equal_Area_Auxiliary_Sphere
# Orthographic_Auxiliary_Sphere
# Gnomonic_Auxiliary_Sphere
# Polar_Stereographic_Variant_B
# Polar_Stereographic_Variant_C
# Quartic_Authalic_Ellipsoidal
# Adams_Square_II
# Peirce_Quincuncial
"""
config = yaml.load(config_str)
all_projs = []
def generate_mapping(WKT2_name, esri_proj_name, Params, suffix=''):
c_name = 'paramsESRI_%s%s' % (esri_proj_name, suffix)
if isinstance(WKT2_name, list):
for WKT2_name_s in WKT2_name:
all_projs.append([esri_proj_name, WKT2_name_s, c_name])
else:
all_projs.append([esri_proj_name, WKT2_name, c_name])
print('static const ESRIParamMapping %s[] = { ' % c_name)
for param in Params:
for param_name in param:
param_value = param[param_name]
default_value = None
if isinstance(param_value, dict):
default_value = param_value.get('Default', None)
param_value = param_value['Name']
if isinstance(param_value, str):
if param_value.startswith('EPSG_'):
print(' { "%s", %s, %s, "%.1f", %s },' % (param_name, param_value, param_value.replace('_NAME_', '_CODE_'), default_value or 0.0, "true" if default_value is not None else "false"))
else:
print(' { "%s", "%s", 0, "%.1f", %s },' % (param_name, param_value, default_value or 0.0, "true" if default_value is not None else "false"))
else:
print(' { "%s", nullptr, 0, "%.1f", false },' % (param_name, param_value))
print(' { nullptr, nullptr, 0, "0.0", false }')
print('};')
print('// This file was generated by scripts/build_esri_projection_mapping.py. DO NOT EDIT !')
print('')
print("""
/******************************************************************************
*
* Project: PROJ
* Purpose: Mappings between ESRI projection and parameters names and WKT2
* Author: Even Rouault <even dot rouault at spatialys dot com>
*
******************************************************************************
* Copyright (c) 2019, Even Rouault <even dot rouault at spatialys dot com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
****************************************************************************/
#ifndef FROM_COORDINATE_OPERATION_CPP
#error This file should only be included from coordinateoperation.cpp
#endif
#ifndef ESRI_PROJECTION_MAPPINGS_HH_INCLUDED
#define ESRI_PROJECTION_MAPPINGS_HH_INCLUDED
#include "coordinateoperation_internal.hpp"
//! @cond Doxygen_Suppress
// ---------------------------------------------------------------------------
// anonymous namespace
namespace {
using namespace ::NS_PROJ;
using namespace ::NS_PROJ::operation;
""")
for item in config:
for esri_proj_name in item:
proj_config = item[esri_proj_name]
if isinstance(proj_config, dict):
WKT2_name = proj_config['WKT2_name']
Params = proj_config['Params']
generate_mapping(WKT2_name, esri_proj_name, Params)
else:
count = 1
for subconfig in proj_config:
WKT2_name = subconfig['WKT2_name']
Params = subconfig['Params']
generate_mapping(WKT2_name, esri_proj_name, Params,
suffix='_alt%d' % count)
count += 1
print('')
print('static const ESRIMethodMapping esriMappings[] = {')
for esri_proj_name, WKT2_name, c_name in all_projs:
if WKT2_name.startswith('EPSG_'):
print(' { "%s", %s, %s, %s },' % (esri_proj_name, WKT2_name, WKT2_name.replace('_NAME_', '_CODE_'), c_name))
elif WKT2_name.startswith('PROJ_'):
print(' { "%s", %s, 0, %s },' % (esri_proj_name, WKT2_name, c_name))
else:
print(' { "%s", "%s", 0, %s },' % (esri_proj_name, WKT2_name, c_name))
print('};')
print("""
// ---------------------------------------------------------------------------
} // namespace {
//! @endcond
#endif // ESRI_PROJECTION_MAPPINGS_HH_INCLUDED
""")
| 43.773881
| 201
| 0.758653
| 4,425
| 36,201
| 5.615593
| 0.087684
| 0.108173
| 0.205924
| 0.113324
| 0.837901
| 0.816814
| 0.796451
| 0.76651
| 0.754236
| 0.737132
| 0
| 0.009177
| 0.178227
| 36,201
| 826
| 202
| 43.826877
| 0.826112
| 0.037761
| 0
| 0.602322
| 1
| 0
| 0.935405
| 0.490121
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001451
| false
| 0
| 0.001451
| 0
| 0.002903
| 0.023222
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
43b6c163d546dadd1c42df5d54e0c79691b01fa8
| 140
|
py
|
Python
|
models/__init__.py
|
liuyixin-louis/DCP_py3
|
30faeb962db63dcba1b54a550e4035021cbb1be9
|
[
"BSD-3-Clause"
] | null | null | null |
models/__init__.py
|
liuyixin-louis/DCP_py3
|
30faeb962db63dcba1b54a550e4035021cbb1be9
|
[
"BSD-3-Clause"
] | null | null | null |
models/__init__.py
|
liuyixin-louis/DCP_py3
|
30faeb962db63dcba1b54a550e4035021cbb1be9
|
[
"BSD-3-Clause"
] | null | null | null |
from .preresnet import *
from .pruned_preresnet import *
from .resnet import *
from .pruned_preresnet import *
from .pruned_vgg import *
| 28
| 32
| 0.764286
| 18
| 140
| 5.777778
| 0.333333
| 0.384615
| 0.548077
| 0.480769
| 0.673077
| 0.673077
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164286
| 140
| 5
| 33
| 28
| 0.888889
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
43dc6c36756792a423d49f019ad84342aa609166
| 3,316
|
py
|
Python
|
bz2_compressed.py
|
ftondolo/Library-Of-Pybel
|
f1f2fe99c02fa4d3b57c440966422286721a5c2f
|
[
"MIT"
] | null | null | null |
bz2_compressed.py
|
ftondolo/Library-Of-Pybel
|
f1f2fe99c02fa4d3b57c440966422286721a5c2f
|
[
"MIT"
] | null | null | null |
bz2_compressed.py
|
ftondolo/Library-Of-Pybel
|
f1f2fe99c02fa4d3b57c440966422286721a5c2f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import bz2, base64
exec(bz2.decompress(base64.b64decode('QlpoOTFBWSZTWaktKskAAbdfgGAQev//93////6////6YAucfUAVQAAe7u60bWyoUUCQkKKoOEqUANAAAAAABoAAAAAAHDQyaaGmRoaZGQZGRoZAYmjJoAyZGIYSRJT9TU9I2oAAAAAaAB+qeoAAAACTSiTKaKfpR5Eeo8UeoNAAAAAGgA0yAOGhk00NMjQ0yMgyMjQyAxNGTQBkyMQwkSCNAgE0AAkxppNRkwJoDQMmE0NMTBJ086qqsPrfmMJjEJwP3aIJlKcC06MLgmBQpkalMYFJutznFwfoXAYDJsJcnQ3S07rgTs4tDOSfmMmUyk5LeKVp3h/pKE7xmRKvSrOrX+P94mN42XxBpr++7XTcIRyEnpMArAWRUiMYoxIIyQ37HA5bCy/CwYdop0+2B3bTvNGUMVOX4fcXVsb2/fFjtKXUYtk0Gb+zUeWNSmB+TWcSsDChBCINKl0xUGcrOrjRSvIMFdDFpKCkE+eSm6VeOuYqJMGMOWVjKX4/kqb83bWZU/9oSzYrhuu+YUL4GtRuuw14NhDGMrclFaLLc57b6GbrUsTU8yfAJYmT0vUNsWylJFFvahVY0i/dpeai1S474UzOClmTdbnKVHpQoLG4JMkF0baFZY7Sm2dnNjGfjXbVzrMwzA2TQZkiuhJO0mpkyhcFfVI/D4ruU2pdR1afe1Ih5yV/u/BwiQ8pwUO7eYEWe/LnONxwP80Jvk2tGnBD4ve9s5f+74XsoW2FtC2hbBF4M6Dh2kZWOKU5p02yupUaCFDAgMHeU2V2RpYlGjdTWCTl2M/Je2bQ5x0KFo/ME+0nM9AT5IGSNBnUh9EhYF8BeDIr3yVEPxrVJq9IY/jnvJkzZCFeXinvd0iLDmqkLCUg1XSkUZuYU90cYjzwY5Dj5nSTVLmpzn5yKnAZ3fM37GXVoYK3JMmymLecZRputBS+9nt7ixcZnw3VvxDQFtyska0ajS+2Abnjj27a2yrBkMMTG94TM4CSxUAYjCg2OmG88zEyZUY0rmlvjRjgnkkamlNmAP4DcQSMQg/QWRfy2x1IywI4cZms3k44DpJLfqLSe+Ph4I3ZS6UeVi3tgTYm0mxMYj3jyz9W08RJXyhI2yOZ63B3cuoWpaURN4ocg47Q94FvKEDMaZw29Z85f4/Lw7vQzkFEtXkQMmkoFxGcTq8TqnVUPSiLmDaVQwKGMc8/LI+K3XkV+eQsTJgsMSoewvuPXmJfAEWMN15YeFVBuhEgDzZ6f2zkz6HYXZ2nbHL3lKGNe7pO4seE98q3OcRJSEfQykyx7Xig5DM1CkK5FGY36lqCDo7zYaEgZ093h5+kjPSKinhC9BsNRzFiyLiLlxFIj3ns7fcLYanuGQ8OTIOi1fbzSuz5EVlpBrQ6lii6H2FC6894lfUICjz7yhoPQncVLoIpNnpcuNRNeim1WxSdGySIcgytGqG5Sl6GDmOc6DpNRrNg52iYjQBuN5eGoQkIG3A4nI+B0H7IEkhIPRlPcA8f+DvH8nfPAf0b+4qqqKqqqn2Eg/X8AJppsBnl55kkUDT6SCnPwQYCa2hBCS4EFdSJh4cU1RCOxZhBnQ1UqCcFMiKSIRlYB5y/jq/cI4pLeVUAj8gOQVrSbbjsSPqq2P5pChjUTdVsRSIIIKMqFlcT8jZsyS/LYNi+iFs1qs63C0EfVJLqOshhcV82NsbH2OEhgjT60wJ/aI+c44GsSX2BvySSMUUzdNEQLPWko8FnwOCFrKIM7wegI33H01NPERt+PoKug7KyspKkV/iFJtIYYMNxAwwCrXx3fLUVFRzp4XUMZJvNjZR/uQQN4Q3BEjO0IJwZJ2Ts4gOvwkCzBY5R8lqLGLAIod1QIDohIYQYClkMvSFIEmr7csersX+Y7g1DMMTTMl5drNBFTcWyM3EtKNRGRlCzkvFDBkalmHGK8aUciZwWi2SqKUlNblRJIwQEV3Z8l6JFApcVEFwjARtZLKbB5wwUBDFkOvigcu0depGHmjIV72mSDrWcWI0CY8QDY70HywhF+1Ja0pCWAsBGQcbBYGJIn5CS6dOECaYDaBtjGDDFKwYEKiR1pVFSAOJAvWmj/QmC6X+eVyXAQtp1ICGhjSRgXX+kUEQeAtgiaskZDaE6jtMggyAiLGQSQdmIgA+vmEes0rmeFxoSJit2IOkTCEKYc6DnBFrhehnQ2mMbPIhEFEjUAxIuFycPY5A0kZjFirGDSOoLzvcj88oPp7xZrUkaubW0WWlGQ1nMfBCzxYYkpVevFw14AboZMsGlUGSCXrD23NyKMiYOnLySy+VoM5aHtvOZFvS2NpNjXSkeBD8BcgWXYulrcZiYG04jJIY/FnRAqiWtuxiwCvN7Jo1CaBpMbBjQ00faFOfTokl4a6UOlnqP4V8c62GzyBGJAdRLCVTlWw1gMCpzC3GNm7BtM5EjAvRYa93uBLidAL2LlTAhIVjpvgLkItGoUq7E0BqTrL0i5iMGMYzwSb2QoWQRgOhvQzIGA1Zl0gkshGBqTXfiSTMdJCiR74qBMvYJDYAppDJUz7HmM7QGEJL7Vzl2iR3Bj+oFRIW8QejT+wrI5jxIOCuA85LmF5eV7m02Nq71eCuOpnx+JewVFxVDq6QkHKLZeVakvHVCho9cyrL0cyxDryY16MDuCosQtA9hDfDgEC8ZHHZNaiIT2SyYWREkPJEjEpwwYw1BGAuiHHr54TmCWHQCJpJFZUJIxQXoCauAYGoa+UaVxcksEgvQfJzmlOcF0LVJL1DTTRsDu/QUI3Nsb6vTs19HZqFdxVMpGUKKSOYxnJKV7umQc0XsJnrCxEi1tkwwLoLjIkYl0WaJlSCKoaRAEZ4nMdleQshY+rnQaqqq9OE6nsMOCDimIQ+kwESoLuopDOsHhjssdE9qAaB6mKSa7VlilAkjQxUDlGcm5tm+6wmrMQUo80U1VOzoj3EUKtBq2VKsvbcMhjYSMllOTJBROiSF2Eh3GjWkwFsxCwMEGGqQy5uYSCBlySmCgokBRUCqykBYL7YvlJx9sNpyodNhzc0WtzzidoREhhETBpd6N65UiSFmMRzzgPA8+W9INaob3knwAA/omkNw51SOAmMFIXA05jHz6yomMQxit6wKgqeFgXye1YYr2DBcAQVFxyYLr1nQuD5+mXIHaLrzVwq0CQ0I7n7RNe4xFXGY++AJiX3NJC9guchi3GwkDRyOBgpigBXdM2cnoFKENxDxP+LuSKcKEhUlpVkg')))
| 829
| 3,274
| 0.969843
| 82
| 3,316
| 39.219512
| 0.97561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127795
| 0.001809
| 3,316
| 3
| 3,275
| 1,105.333333
| 0.843807
| 0.006031
| 0
| 0
| 0
| 0.5
| 0.98088
| 0.98088
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 10
|
78e1f949149c6eb886a4aba4e5799d94f030b369
| 859,919
|
py
|
Python
|
kubernetes/client/api/cluster_xk8s_io_v1alpha4_api.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/api/cluster_xk8s_io_v1alpha4_api.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/api/cluster_xk8s_io_v1alpha4_api.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from kubernetes.client.api_client import ApiClient
from kubernetes.client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class ClusterXK8sIoV1alpha4Api(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_cluster(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_cluster # noqa: E501
create a Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_cluster(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4Cluster body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4Cluster
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_cluster_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_cluster_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_cluster # noqa: E501
create a Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_cluster_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4Cluster body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4Cluster, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_cluster" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_cluster`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_cluster`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/clusters', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4Cluster', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_cluster_class(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_cluster_class # noqa: E501
create a ClusterClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_cluster_class(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4ClusterClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4ClusterClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_cluster_class_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_cluster_class_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_cluster_class # noqa: E501
create a ClusterClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_cluster_class_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4ClusterClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4ClusterClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_cluster_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_cluster_class`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_cluster_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/clusterclasses', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4ClusterClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_machine(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_machine # noqa: E501
create a Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_machine(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4Machine body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4Machine
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_machine_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_machine_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_machine # noqa: E501
create a Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_machine_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4Machine body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4Machine, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_machine" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_machine`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_machine`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machines', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4Machine', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_machine_deployment(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_machine_deployment # noqa: E501
create a MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_machine_deployment(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachineDeployment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineDeployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_machine_deployment_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_machine_deployment_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_machine_deployment # noqa: E501
create a MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_machine_deployment_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachineDeployment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineDeployment, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_machine_deployment" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_machine_deployment`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_machine_deployment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinedeployments', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineDeployment', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_machine_health_check(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_machine_health_check # noqa: E501
create a MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_machine_health_check(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachineHealthCheck body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineHealthCheck
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_machine_health_check_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_machine_health_check_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_machine_health_check # noqa: E501
create a MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_machine_health_check_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachineHealthCheck body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineHealthCheck, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_machine_health_check" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_machine_health_check`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_machine_health_check`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinehealthchecks', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineHealthCheck', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_machine_pool(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_machine_pool # noqa: E501
create a MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_machine_pool(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachinePool body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachinePool
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_machine_pool_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_machine_pool_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_machine_pool # noqa: E501
create a MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_machine_pool_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachinePool body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachinePool, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_machine_pool" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_machine_pool`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_machine_pool`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinepools', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachinePool', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_machine_set(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_machine_set # noqa: E501
create a MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_machine_set(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachineSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_machine_set_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_machine_set_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_machine_set # noqa: E501
create a MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_machine_set_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachineSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineSet, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_machine_set" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_machine_set`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_machine_set`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinesets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineSet', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_cluster(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_cluster # noqa: E501
delete collection of Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_cluster(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_cluster_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_cluster_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_cluster # noqa: E501
delete collection of Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_cluster_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_cluster" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_cluster`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/clusters', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_cluster_class(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_cluster_class # noqa: E501
delete collection of ClusterClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_cluster_class(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_cluster_class_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_cluster_class_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_cluster_class # noqa: E501
delete collection of ClusterClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_cluster_class_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_cluster_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_cluster_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/clusterclasses', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_machine(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_machine # noqa: E501
delete collection of Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_machine(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_machine_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_machine_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_machine # noqa: E501
delete collection of Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_machine_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_machine" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_machine`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machines', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_machine_deployment(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_machine_deployment # noqa: E501
delete collection of MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_machine_deployment(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_machine_deployment_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_machine_deployment_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_machine_deployment # noqa: E501
delete collection of MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_machine_deployment_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_machine_deployment" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_machine_deployment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinedeployments', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_machine_health_check(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_machine_health_check # noqa: E501
delete collection of MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_machine_health_check(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_machine_health_check_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_machine_health_check_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_machine_health_check # noqa: E501
delete collection of MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_machine_health_check_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_machine_health_check" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_machine_health_check`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinehealthchecks', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_machine_pool(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_machine_pool # noqa: E501
delete collection of MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_machine_pool(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_machine_pool_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_machine_pool_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_machine_pool # noqa: E501
delete collection of MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_machine_pool_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_machine_pool" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_machine_pool`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinepools', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_machine_set(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_machine_set # noqa: E501
delete collection of MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_machine_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_machine_set_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_machine_set_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_machine_set # noqa: E501
delete collection of MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_machine_set_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_machine_set" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_machine_set`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinesets', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_cluster(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_cluster # noqa: E501
delete a Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_cluster(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Cluster (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_cluster_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_cluster_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_cluster # noqa: E501
delete a Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_cluster_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Cluster (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_cluster" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_cluster`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_cluster`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/clusters/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_cluster_class(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_cluster_class # noqa: E501
delete a ClusterClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_cluster_class(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterClass (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_cluster_class_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_cluster_class_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_cluster_class # noqa: E501
delete a ClusterClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_cluster_class_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterClass (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_cluster_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_cluster_class`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_cluster_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/clusterclasses/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_machine(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_machine # noqa: E501
delete a Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_machine(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Machine (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_machine_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_machine_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_machine # noqa: E501
delete a Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_machine_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Machine (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_machine" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_machine`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_machine`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machines/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_machine_deployment(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_machine_deployment # noqa: E501
delete a MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_machine_deployment(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineDeployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_machine_deployment_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_machine_deployment_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_machine_deployment # noqa: E501
delete a MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_machine_deployment_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineDeployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_machine_deployment" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_machine_deployment`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_machine_deployment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinedeployments/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_machine_health_check(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_machine_health_check # noqa: E501
delete a MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_machine_health_check(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineHealthCheck (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_machine_health_check_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_machine_health_check_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_machine_health_check # noqa: E501
delete a MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_machine_health_check_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineHealthCheck (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_machine_health_check" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_machine_health_check`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_machine_health_check`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinehealthchecks/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_machine_pool(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_machine_pool # noqa: E501
delete a MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_machine_pool(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachinePool (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_machine_pool_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_machine_pool_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_machine_pool # noqa: E501
delete a MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_machine_pool_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachinePool (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_machine_pool" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_machine_pool`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_machine_pool`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinepools/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_machine_set(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_machine_set # noqa: E501
delete a MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_machine_set(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1StatusV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_machine_set_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_machine_set_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_machine_set # noqa: E501
delete a MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_machine_set_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptionsV2 body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1StatusV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_machine_set" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_machine_set`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_machine_set`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinesets/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1StatusV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_cluster_class_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_cluster_class_for_all_namespaces # noqa: E501
list objects of kind ClusterClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_cluster_class_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4ClusterClassList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_cluster_class_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_cluster_class_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_cluster_class_for_all_namespaces # noqa: E501
list objects of kind ClusterClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_cluster_class_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4ClusterClassList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_cluster_class_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/clusterclasses', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4ClusterClassList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_cluster_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_cluster_for_all_namespaces # noqa: E501
list objects of kind Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_cluster_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4ClusterList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_cluster_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_cluster_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_cluster_for_all_namespaces # noqa: E501
list objects of kind Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_cluster_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4ClusterList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_cluster_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/clusters', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4ClusterList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_machine_deployment_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_machine_deployment_for_all_namespaces # noqa: E501
list objects of kind MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_machine_deployment_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineDeploymentList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_machine_deployment_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_machine_deployment_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_machine_deployment_for_all_namespaces # noqa: E501
list objects of kind MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_machine_deployment_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineDeploymentList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_machine_deployment_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/machinedeployments', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineDeploymentList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_machine_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_machine_for_all_namespaces # noqa: E501
list objects of kind Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_machine_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_machine_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_machine_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_machine_for_all_namespaces # noqa: E501
list objects of kind Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_machine_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_machine_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/machines', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_machine_health_check_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_machine_health_check_for_all_namespaces # noqa: E501
list objects of kind MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_machine_health_check_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineHealthCheckList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_machine_health_check_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_machine_health_check_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_machine_health_check_for_all_namespaces # noqa: E501
list objects of kind MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_machine_health_check_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineHealthCheckList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_machine_health_check_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/machinehealthchecks', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineHealthCheckList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_machine_pool_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_machine_pool_for_all_namespaces # noqa: E501
list objects of kind MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_machine_pool_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachinePoolList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_machine_pool_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_machine_pool_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_machine_pool_for_all_namespaces # noqa: E501
list objects of kind MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_machine_pool_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachinePoolList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_machine_pool_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/machinepools', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachinePoolList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_machine_set_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_machine_set_for_all_namespaces # noqa: E501
list objects of kind MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_machine_set_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineSetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_machine_set_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_machine_set_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_machine_set_for_all_namespaces # noqa: E501
list objects of kind MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_machine_set_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineSetList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_machine_set_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/machinesets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineSetList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_cluster(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_cluster # noqa: E501
list objects of kind Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_cluster(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4ClusterList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_cluster_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_cluster_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_cluster # noqa: E501
list objects of kind Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_cluster_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4ClusterList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_cluster" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_cluster`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/clusters', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4ClusterList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_cluster_class(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_cluster_class # noqa: E501
list objects of kind ClusterClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_cluster_class(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4ClusterClassList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_cluster_class_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_cluster_class_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_cluster_class # noqa: E501
list objects of kind ClusterClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_cluster_class_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4ClusterClassList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_cluster_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_cluster_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/clusterclasses', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4ClusterClassList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_machine(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_machine # noqa: E501
list objects of kind Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_machine(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_machine_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_machine_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_machine # noqa: E501
list objects of kind Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_machine_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_machine" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_machine`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machines', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_machine_deployment(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_machine_deployment # noqa: E501
list objects of kind MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_machine_deployment(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineDeploymentList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_machine_deployment_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_machine_deployment_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_machine_deployment # noqa: E501
list objects of kind MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_machine_deployment_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineDeploymentList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_machine_deployment" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_machine_deployment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinedeployments', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineDeploymentList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_machine_health_check(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_machine_health_check # noqa: E501
list objects of kind MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_machine_health_check(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineHealthCheckList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_machine_health_check_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_machine_health_check_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_machine_health_check # noqa: E501
list objects of kind MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_machine_health_check_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineHealthCheckList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_machine_health_check" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_machine_health_check`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinehealthchecks', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineHealthCheckList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_machine_pool(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_machine_pool # noqa: E501
list objects of kind MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_machine_pool(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachinePoolList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_machine_pool_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_machine_pool_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_machine_pool # noqa: E501
list objects of kind MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_machine_pool_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachinePoolList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_machine_pool" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_machine_pool`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinepools', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachinePoolList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_machine_set(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_machine_set # noqa: E501
list objects of kind MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_machine_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineSetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_machine_set_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_machine_set_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_machine_set # noqa: E501
list objects of kind MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_machine_set_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineSetList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'resource_version_match',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_machine_set" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_machine_set`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinesets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineSetList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_cluster(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_cluster # noqa: E501
partially update the specified Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cluster(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Cluster (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4Cluster
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_cluster_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_cluster_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_cluster # noqa: E501
partially update the specified Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cluster_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Cluster (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4Cluster, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_cluster" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_cluster`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_cluster`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_cluster`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/clusters/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4Cluster', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_cluster_class(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_cluster_class # noqa: E501
partially update the specified ClusterClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cluster_class(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterClass (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4ClusterClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_cluster_class_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_cluster_class_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_cluster_class # noqa: E501
partially update the specified ClusterClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cluster_class_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterClass (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4ClusterClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_cluster_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_cluster_class`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_cluster_class`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_cluster_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/clusterclasses/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4ClusterClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_cluster_status(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_cluster_status # noqa: E501
partially update status of the specified Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cluster_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Cluster (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4Cluster
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_cluster_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_cluster_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_cluster_status # noqa: E501
partially update status of the specified Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cluster_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Cluster (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4Cluster, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_cluster_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_cluster_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_cluster_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_cluster_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/clusters/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4Cluster', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_machine(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine # noqa: E501
partially update the specified Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Machine (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4Machine
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_machine_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_machine_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine # noqa: E501
partially update the specified Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Machine (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4Machine, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_machine" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_machine`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_machine`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_machine`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machines/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4Machine', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_machine_deployment(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_deployment # noqa: E501
partially update the specified MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_deployment(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineDeployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineDeployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_machine_deployment_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_machine_deployment_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_deployment # noqa: E501
partially update the specified MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_deployment_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineDeployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineDeployment, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_machine_deployment" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_machine_deployment`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_machine_deployment`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_machine_deployment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinedeployments/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineDeployment', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_machine_deployment_scale(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_deployment_scale # noqa: E501
partially update scale of the specified MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_deployment_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineDeployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ScaleV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_machine_deployment_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_machine_deployment_scale_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_deployment_scale # noqa: E501
partially update scale of the specified MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_deployment_scale_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineDeployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ScaleV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_machine_deployment_scale" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_machine_deployment_scale`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_machine_deployment_scale`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_machine_deployment_scale`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinedeployments/{name}/scale', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ScaleV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_machine_deployment_status(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_deployment_status # noqa: E501
partially update status of the specified MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_deployment_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineDeployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineDeployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_machine_deployment_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_machine_deployment_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_deployment_status # noqa: E501
partially update status of the specified MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_deployment_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineDeployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineDeployment, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_machine_deployment_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_machine_deployment_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_machine_deployment_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_machine_deployment_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinedeployments/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineDeployment', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_machine_health_check(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_health_check # noqa: E501
partially update the specified MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_health_check(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineHealthCheck (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineHealthCheck
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_machine_health_check_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_machine_health_check_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_health_check # noqa: E501
partially update the specified MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_health_check_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineHealthCheck (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineHealthCheck, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_machine_health_check" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_machine_health_check`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_machine_health_check`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_machine_health_check`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinehealthchecks/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineHealthCheck', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_machine_health_check_status(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_health_check_status # noqa: E501
partially update status of the specified MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_health_check_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineHealthCheck (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineHealthCheck
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_machine_health_check_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_machine_health_check_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_health_check_status # noqa: E501
partially update status of the specified MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_health_check_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineHealthCheck (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineHealthCheck, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_machine_health_check_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_machine_health_check_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_machine_health_check_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_machine_health_check_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinehealthchecks/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineHealthCheck', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_machine_pool(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_pool # noqa: E501
partially update the specified MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_pool(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachinePool (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachinePool
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_machine_pool_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_machine_pool_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_pool # noqa: E501
partially update the specified MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_pool_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachinePool (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachinePool, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_machine_pool" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_machine_pool`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_machine_pool`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_machine_pool`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinepools/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachinePool', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_machine_pool_scale(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_pool_scale # noqa: E501
partially update scale of the specified MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_pool_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachinePool (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ScaleV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_machine_pool_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_machine_pool_scale_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_pool_scale # noqa: E501
partially update scale of the specified MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_pool_scale_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachinePool (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ScaleV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_machine_pool_scale" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_machine_pool_scale`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_machine_pool_scale`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_machine_pool_scale`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinepools/{name}/scale', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ScaleV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_machine_pool_status(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_pool_status # noqa: E501
partially update status of the specified MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_pool_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachinePool (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachinePool
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_machine_pool_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_machine_pool_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_pool_status # noqa: E501
partially update status of the specified MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_pool_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachinePool (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachinePool, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_machine_pool_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_machine_pool_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_machine_pool_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_machine_pool_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinepools/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachinePool', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_machine_set(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_set # noqa: E501
partially update the specified MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_set(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_machine_set_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_machine_set_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_set # noqa: E501
partially update the specified MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_set_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineSet, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_machine_set" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_machine_set`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_machine_set`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_machine_set`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinesets/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineSet', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_machine_set_scale(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_set_scale # noqa: E501
partially update scale of the specified MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_set_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ScaleV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_machine_set_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_machine_set_scale_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_set_scale # noqa: E501
partially update scale of the specified MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_set_scale_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ScaleV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_machine_set_scale" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_machine_set_scale`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_machine_set_scale`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_machine_set_scale`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinesets/{name}/scale', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ScaleV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_machine_set_status(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_set_status # noqa: E501
partially update status of the specified MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_set_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_machine_set_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_machine_set_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_set_status # noqa: E501
partially update status of the specified MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_set_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineSet, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_machine_set_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_machine_set_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_machine_set_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_machine_set_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinesets/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineSet', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_machine_status(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_status # noqa: E501
partially update status of the specified Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Machine (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4Machine
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_machine_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_machine_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_machine_status # noqa: E501
partially update status of the specified Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_machine_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Machine (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4Machine, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_machine_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_machine_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_machine_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_machine_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machines/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4Machine', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_cluster(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_cluster # noqa: E501
read the specified Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_cluster(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Cluster (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4Cluster
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_cluster_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_cluster_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_cluster # noqa: E501
read the specified Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_cluster_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Cluster (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4Cluster, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_cluster" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_cluster`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_cluster`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/clusters/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4Cluster', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_cluster_class(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_cluster_class # noqa: E501
read the specified ClusterClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_cluster_class(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterClass (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4ClusterClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_cluster_class_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_cluster_class_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_cluster_class # noqa: E501
read the specified ClusterClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_cluster_class_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterClass (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4ClusterClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_cluster_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_cluster_class`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_cluster_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/clusterclasses/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4ClusterClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_cluster_status(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_cluster_status # noqa: E501
read status of the specified Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_cluster_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Cluster (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4Cluster
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_cluster_status_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_cluster_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_cluster_status # noqa: E501
read status of the specified Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_cluster_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Cluster (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4Cluster, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_cluster_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_cluster_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_cluster_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/clusters/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4Cluster', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_machine(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine # noqa: E501
read the specified Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Machine (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4Machine
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_machine_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_machine_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine # noqa: E501
read the specified Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Machine (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4Machine, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_machine" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_machine`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_machine`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machines/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4Machine', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_machine_deployment(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_deployment # noqa: E501
read the specified MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_deployment(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineDeployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineDeployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_machine_deployment_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_machine_deployment_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_deployment # noqa: E501
read the specified MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_deployment_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineDeployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineDeployment, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_machine_deployment" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_machine_deployment`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_machine_deployment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinedeployments/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineDeployment', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_machine_deployment_scale(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_deployment_scale # noqa: E501
read scale of the specified MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_deployment_scale(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineDeployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ScaleV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_machine_deployment_scale_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_machine_deployment_scale_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_deployment_scale # noqa: E501
read scale of the specified MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_deployment_scale_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineDeployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ScaleV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_machine_deployment_scale" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_machine_deployment_scale`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_machine_deployment_scale`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinedeployments/{name}/scale', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ScaleV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_machine_deployment_status(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_deployment_status # noqa: E501
read status of the specified MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_deployment_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineDeployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineDeployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_machine_deployment_status_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_machine_deployment_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_deployment_status # noqa: E501
read status of the specified MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_deployment_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineDeployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineDeployment, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_machine_deployment_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_machine_deployment_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_machine_deployment_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinedeployments/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineDeployment', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_machine_health_check(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_health_check # noqa: E501
read the specified MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_health_check(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineHealthCheck (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineHealthCheck
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_machine_health_check_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_machine_health_check_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_health_check # noqa: E501
read the specified MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_health_check_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineHealthCheck (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineHealthCheck, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_machine_health_check" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_machine_health_check`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_machine_health_check`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinehealthchecks/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineHealthCheck', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_machine_health_check_status(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_health_check_status # noqa: E501
read status of the specified MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_health_check_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineHealthCheck (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineHealthCheck
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_machine_health_check_status_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_machine_health_check_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_health_check_status # noqa: E501
read status of the specified MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_health_check_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineHealthCheck (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineHealthCheck, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_machine_health_check_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_machine_health_check_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_machine_health_check_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinehealthchecks/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineHealthCheck', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_machine_pool(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_pool # noqa: E501
read the specified MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_pool(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachinePool (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachinePool
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_machine_pool_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_machine_pool_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_pool # noqa: E501
read the specified MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_pool_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachinePool (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachinePool, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_machine_pool" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_machine_pool`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_machine_pool`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinepools/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachinePool', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_machine_pool_scale(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_pool_scale # noqa: E501
read scale of the specified MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_pool_scale(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachinePool (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ScaleV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_machine_pool_scale_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_machine_pool_scale_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_pool_scale # noqa: E501
read scale of the specified MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_pool_scale_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachinePool (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ScaleV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_machine_pool_scale" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_machine_pool_scale`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_machine_pool_scale`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinepools/{name}/scale', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ScaleV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_machine_pool_status(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_pool_status # noqa: E501
read status of the specified MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_pool_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachinePool (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachinePool
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_machine_pool_status_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_machine_pool_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_pool_status # noqa: E501
read status of the specified MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_pool_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachinePool (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachinePool, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_machine_pool_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_machine_pool_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_machine_pool_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinepools/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachinePool', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_machine_set(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_set # noqa: E501
read the specified MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_set(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_machine_set_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_machine_set_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_set # noqa: E501
read the specified MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_set_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineSet, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_machine_set" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_machine_set`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_machine_set`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinesets/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineSet', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_machine_set_scale(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_set_scale # noqa: E501
read scale of the specified MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_set_scale(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ScaleV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_machine_set_scale_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_machine_set_scale_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_set_scale # noqa: E501
read scale of the specified MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_set_scale_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ScaleV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_machine_set_scale" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_machine_set_scale`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_machine_set_scale`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinesets/{name}/scale', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ScaleV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_machine_set_status(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_set_status # noqa: E501
read status of the specified MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_set_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_machine_set_status_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_machine_set_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_set_status # noqa: E501
read status of the specified MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_set_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineSet, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_machine_set_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_machine_set_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_machine_set_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinesets/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineSet', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_machine_status(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_status # noqa: E501
read status of the specified Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Machine (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4Machine
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_machine_status_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_machine_status_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_machine_status # noqa: E501
read status of the specified Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_machine_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Machine (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4Machine, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'resource_version'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_machine_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_machine_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_machine_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machines/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4Machine', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_cluster(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_cluster # noqa: E501
replace the specified Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_cluster(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Cluster (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4Cluster body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4Cluster
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_cluster_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_cluster_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_cluster # noqa: E501
replace the specified Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_cluster_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Cluster (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4Cluster body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4Cluster, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_cluster" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_cluster`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_cluster`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_cluster`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/clusters/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4Cluster', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_cluster_class(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_cluster_class # noqa: E501
replace the specified ClusterClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_cluster_class(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterClass (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4ClusterClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4ClusterClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_cluster_class_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_cluster_class_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_cluster_class # noqa: E501
replace the specified ClusterClass # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_cluster_class_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the ClusterClass (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4ClusterClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4ClusterClass, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_cluster_class" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_cluster_class`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_cluster_class`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_cluster_class`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/clusterclasses/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4ClusterClass', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_cluster_status(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_cluster_status # noqa: E501
replace status of the specified Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_cluster_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Cluster (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4Cluster body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4Cluster
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_cluster_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_cluster_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_cluster_status # noqa: E501
replace status of the specified Cluster # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_cluster_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Cluster (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4Cluster body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4Cluster, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_cluster_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_cluster_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_cluster_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_cluster_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/clusters/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4Cluster', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_machine(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine # noqa: E501
replace the specified Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Machine (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4Machine body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4Machine
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_machine_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_machine_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine # noqa: E501
replace the specified Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Machine (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4Machine body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4Machine, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_machine" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_machine`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_machine`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_machine`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machines/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4Machine', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_machine_deployment(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_deployment # noqa: E501
replace the specified MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_deployment(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineDeployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachineDeployment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineDeployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_machine_deployment_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_machine_deployment_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_deployment # noqa: E501
replace the specified MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_deployment_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineDeployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachineDeployment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineDeployment, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_machine_deployment" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_machine_deployment`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_machine_deployment`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_machine_deployment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinedeployments/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineDeployment', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_machine_deployment_scale(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_deployment_scale # noqa: E501
replace scale of the specified MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_deployment_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineDeployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ScaleV2 body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ScaleV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_machine_deployment_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_machine_deployment_scale_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_deployment_scale # noqa: E501
replace scale of the specified MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_deployment_scale_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineDeployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ScaleV2 body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ScaleV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_machine_deployment_scale" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_machine_deployment_scale`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_machine_deployment_scale`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_machine_deployment_scale`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinedeployments/{name}/scale', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ScaleV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_machine_deployment_status(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_deployment_status # noqa: E501
replace status of the specified MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_deployment_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineDeployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachineDeployment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineDeployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_machine_deployment_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_machine_deployment_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_deployment_status # noqa: E501
replace status of the specified MachineDeployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_deployment_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineDeployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachineDeployment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineDeployment, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_machine_deployment_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_machine_deployment_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_machine_deployment_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_machine_deployment_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinedeployments/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineDeployment', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_machine_health_check(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_health_check # noqa: E501
replace the specified MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_health_check(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineHealthCheck (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachineHealthCheck body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineHealthCheck
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_machine_health_check_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_machine_health_check_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_health_check # noqa: E501
replace the specified MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_health_check_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineHealthCheck (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachineHealthCheck body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineHealthCheck, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_machine_health_check" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_machine_health_check`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_machine_health_check`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_machine_health_check`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinehealthchecks/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineHealthCheck', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_machine_health_check_status(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_health_check_status # noqa: E501
replace status of the specified MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_health_check_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineHealthCheck (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachineHealthCheck body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineHealthCheck
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_machine_health_check_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_machine_health_check_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_health_check_status # noqa: E501
replace status of the specified MachineHealthCheck # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_health_check_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineHealthCheck (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachineHealthCheck body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineHealthCheck, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_machine_health_check_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_machine_health_check_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_machine_health_check_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_machine_health_check_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinehealthchecks/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineHealthCheck', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_machine_pool(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_pool # noqa: E501
replace the specified MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_pool(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachinePool (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachinePool body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachinePool
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_machine_pool_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_machine_pool_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_pool # noqa: E501
replace the specified MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_pool_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachinePool (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachinePool body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachinePool, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_machine_pool" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_machine_pool`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_machine_pool`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_machine_pool`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinepools/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachinePool', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_machine_pool_scale(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_pool_scale # noqa: E501
replace scale of the specified MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_pool_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachinePool (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ScaleV2 body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ScaleV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_machine_pool_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_machine_pool_scale_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_pool_scale # noqa: E501
replace scale of the specified MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_pool_scale_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachinePool (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ScaleV2 body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ScaleV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_machine_pool_scale" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_machine_pool_scale`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_machine_pool_scale`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_machine_pool_scale`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinepools/{name}/scale', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ScaleV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_machine_pool_status(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_pool_status # noqa: E501
replace status of the specified MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_pool_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachinePool (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachinePool body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachinePool
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_machine_pool_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_machine_pool_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_pool_status # noqa: E501
replace status of the specified MachinePool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_pool_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachinePool (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachinePool body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachinePool, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_machine_pool_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_machine_pool_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_machine_pool_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_machine_pool_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinepools/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachinePool', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_machine_set(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_set # noqa: E501
replace the specified MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_set(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachineSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_machine_set_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_machine_set_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_set # noqa: E501
replace the specified MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_set_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachineSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineSet, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_machine_set" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_machine_set`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_machine_set`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_machine_set`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinesets/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineSet', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_machine_set_scale(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_set_scale # noqa: E501
replace scale of the specified MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_set_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ScaleV2 body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ScaleV2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_machine_set_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_machine_set_scale_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_set_scale # noqa: E501
replace scale of the specified MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_set_scale_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ScaleV2 body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ScaleV2, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_machine_set_scale" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_machine_set_scale`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_machine_set_scale`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_machine_set_scale`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinesets/{name}/scale', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ScaleV2', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_machine_set_status(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_set_status # noqa: E501
replace status of the specified MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_set_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachineSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4MachineSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_machine_set_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_machine_set_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_set_status # noqa: E501
replace status of the specified MachineSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_set_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the MachineSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4MachineSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4MachineSet, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_machine_set_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_machine_set_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_machine_set_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_machine_set_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machinesets/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4MachineSet', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_machine_status(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_status # noqa: E501
replace status of the specified Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Machine (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4Machine body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: IoXK8sClusterV1alpha4Machine
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_machine_status_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_machine_status_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_machine_status # noqa: E501
replace status of the specified Machine # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_machine_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the Machine (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param IoXK8sClusterV1alpha4Machine body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(IoXK8sClusterV1alpha4Machine, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_machine_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_machine_status`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_machine_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_machine_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/cluster.x-k8s.io/v1alpha4/namespaces/{namespace}/machines/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='IoXK8sClusterV1alpha4Machine', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 69.337123
| 1,390
| 0.667285
| 107,984
| 859,919
| 5.142929
| 0.00463
| 0.038376
| 0.062972
| 0.020945
| 0.998857
| 0.998804
| 0.998745
| 0.998686
| 0.998547
| 0.998108
| 0
| 0.014299
| 0.267725
| 859,919
| 12,401
| 1,391
| 69.342714
| 0.86764
| 0.559763
| 0
| 0.838437
| 1
| 0.008772
| 0.229336
| 0.078126
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026635
| false
| 0
| 0.000797
| 0
| 0.054067
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
601f12fa40fa11c94e0fe6b169e47edff49b2244
| 5,435
|
py
|
Python
|
python/ray/tests/test_component_failures.py
|
noahshpak/ray
|
edd783bc327760a4892ab89222ee551e42df15b9
|
[
"Apache-2.0"
] | 2
|
2020-02-17T17:36:23.000Z
|
2020-08-24T19:59:18.000Z
|
python/ray/tests/test_component_failures.py
|
noahshpak/ray
|
edd783bc327760a4892ab89222ee551e42df15b9
|
[
"Apache-2.0"
] | 8
|
2020-11-13T19:02:47.000Z
|
2022-03-12T00:44:51.000Z
|
python/ray/tests/test_component_failures.py
|
noahshpak/ray
|
edd783bc327760a4892ab89222ee551e42df15b9
|
[
"Apache-2.0"
] | 1
|
2021-02-02T02:24:12.000Z
|
2021-02-02T02:24:12.000Z
|
import os
import signal
import sys
import time
import numpy as np
import pytest
import ray
from ray.test_utils import run_string_as_driver_nonblocking, SignalActor
SIGKILL = signal.SIGKILL if sys.platform != "win32" else signal.SIGTERM
# This test checks that when a worker dies in the middle of a get, the plasma
# store and raylet will not die.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Not working with new GCS API.")
def test_dying_worker_get(ray_start_2_cpus):
@ray.remote
def sleep_forever(signal):
ray.get(signal.send.remote())
time.sleep(10**6)
@ray.remote
def get_worker_pid():
return os.getpid()
signal = SignalActor.remote()
x_id = sleep_forever.remote(signal)
ray.get(signal.wait.remote())
# Get the PID of the other worker.
worker_pid = ray.get(get_worker_pid.remote())
@ray.remote
def f(id_in_a_list):
ray.get(id_in_a_list[0])
# Have the worker wait in a get call.
result_id = f.remote([x_id])
time.sleep(1)
# Make sure the task hasn't finished.
ready_ids, _ = ray.wait([result_id], timeout=0)
assert len(ready_ids) == 0
# Kill the worker.
os.kill(worker_pid, SIGKILL)
time.sleep(0.1)
# Make sure the sleep task hasn't finished.
ready_ids, _ = ray.wait([x_id], timeout=0)
assert len(ready_ids) == 0
# Seal the object so the store attempts to notify the worker that the
# get has been fulfilled.
obj = np.ones(200 * 1024, dtype=np.uint8)
ray.worker.global_worker.put_object(obj, x_id)
time.sleep(0.1)
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
# This test checks that when a driver dies in the middle of a get, the plasma
# store and raylet will not die.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Not working with new GCS API.")
def test_dying_driver_get(ray_start_regular):
# Start the Ray processes.
address_info = ray_start_regular
@ray.remote
def sleep_forever():
time.sleep(10**6)
x_id = sleep_forever.remote()
driver = """
import ray
ray.init("{}")
ray.get(ray.ObjectRef(ray.utils.hex_to_binary("{}")))
""".format(address_info["redis_address"], x_id.hex())
p = run_string_as_driver_nonblocking(driver)
# Make sure the driver is running.
time.sleep(1)
assert p.poll() is None
# Kill the driver process.
p.kill()
p.wait()
time.sleep(0.1)
# Make sure the original task hasn't finished.
ready_ids, _ = ray.wait([x_id], timeout=0)
assert len(ready_ids) == 0
# Seal the object so the store attempts to notify the worker that the
# get has been fulfilled.
obj = np.ones(200 * 1024, dtype=np.uint8)
ray.worker.global_worker.put_object(obj, x_id)
time.sleep(0.1)
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
# This test checks that when a worker dies in the middle of a wait, the plasma
# store and raylet will not die.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Not working with new GCS API.")
def test_dying_worker_wait(ray_start_2_cpus):
@ray.remote
def sleep_forever():
time.sleep(10**6)
@ray.remote
def get_pid():
return os.getpid()
x_id = sleep_forever.remote()
# Get the PID of the worker that block_in_wait will run on (sleep a little
# to make sure that sleep_forever has already started).
time.sleep(0.1)
worker_pid = ray.get(get_pid.remote())
@ray.remote
def block_in_wait(object_ref_in_list):
ray.wait(object_ref_in_list)
# Have the worker wait in a wait call.
block_in_wait.remote([x_id])
time.sleep(0.1)
# Kill the worker.
os.kill(worker_pid, SIGKILL)
time.sleep(0.1)
# Create the object.
obj = np.ones(200 * 1024, dtype=np.uint8)
ray.worker.global_worker.put_object(obj, x_id)
time.sleep(0.1)
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
# This test checks that when a driver dies in the middle of a wait, the plasma
# store and raylet will not die.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Not working with new GCS API.")
def test_dying_driver_wait(ray_start_regular):
# Start the Ray processes.
address_info = ray_start_regular
@ray.remote
def sleep_forever():
time.sleep(10**6)
x_id = sleep_forever.remote()
driver = """
import ray
ray.init("{}")
ray.wait([ray.ObjectRef(ray.utils.hex_to_binary("{}"))])
""".format(address_info["redis_address"], x_id.hex())
p = run_string_as_driver_nonblocking(driver)
# Make sure the driver is running.
time.sleep(1)
assert p.poll() is None
# Kill the driver process.
p.kill()
p.wait()
time.sleep(0.1)
# Make sure the original task hasn't finished.
ready_ids, _ = ray.wait([x_id], timeout=0)
assert len(ready_ids) == 0
# Seal the object so the store attempts to notify the worker that the
# wait can return.
obj = np.ones(200 * 1024, dtype=np.uint8)
ray.worker.global_worker.put_object(obj, x_id)
time.sleep(0.1)
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| 27.449495
| 78
| 0.675989
| 867
| 5,435
| 4.058824
| 0.163783
| 0.043478
| 0.028417
| 0.031259
| 0.836317
| 0.778346
| 0.751634
| 0.748508
| 0.719523
| 0.695084
| 0
| 0.018661
| 0.211224
| 5,435
| 197
| 79
| 27.588832
| 0.802193
| 0.266789
| 0
| 0.737288
| 0
| 0
| 0.098784
| 0.027609
| 0
| 0
| 0
| 0
| 0.084746
| 1
| 0.101695
| false
| 0
| 0.09322
| 0.016949
| 0.211864
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
60589edf6483971a8568bf51b2500f1bb827a561
| 6,804
|
py
|
Python
|
2021/24_1/a.py
|
budavariam/advent_of_code
|
0903bcbb0df46371b6a340ca2be007dce6470c66
|
[
"MIT"
] | null | null | null |
2021/24_1/a.py
|
budavariam/advent_of_code
|
0903bcbb0df46371b6a340ca2be007dce6470c66
|
[
"MIT"
] | null | null | null |
2021/24_1/a.py
|
budavariam/advent_of_code
|
0903bcbb0df46371b6a340ca2be007dce6470c66
|
[
"MIT"
] | 1
|
2022-02-11T13:14:50.000Z
|
2022-02-11T13:14:50.000Z
|
import math
def monad(chknum):
w,x,y,z= [0]*4
w = int(chknum[0])
x *= 0
x += z
if x < 0 or 26 < 0: return [chknum,None,None,None,None, 'Failed on mod instruction after num 1']
x %= 26
if 1 == 0: return [chknum,None,None,None,None, 'Failed on div instruction after num 1']
z = math.trunc(z / 1)
x += 10
x = 1 if x == w else 0
x = 1 if x == 0 else 0
y *= 0
y += 25
y *= x
y += 1
z *= y
y *= 0
y += w
y += 2
y *= x
z += y
w = int(chknum[1])
x *= 0
x += z
if x < 0 or 26 < 0: return [chknum,None,None,None,None, 'Failed on mod instruction after num 2']
x %= 26
if 1 == 0: return [chknum,None,None,None,None, 'Failed on div instruction after num 2']
z = math.trunc(z / 1)
x += 10
x = 1 if x == w else 0
x = 1 if x == 0 else 0
y *= 0
y += 25
y *= x
y += 1
z *= y
y *= 0
y += w
y += 4
y *= x
z += y
w = int(chknum[2])
x *= 0
x += z
if x < 0 or 26 < 0: return [chknum,None,None,None,None, 'Failed on mod instruction after num 3']
x %= 26
if 1 == 0: return [chknum,None,None,None,None, 'Failed on div instruction after num 3']
z = math.trunc(z / 1)
x += 14
x = 1 if x == w else 0
x = 1 if x == 0 else 0
y *= 0
y += 25
y *= x
y += 1
z *= y
y *= 0
y += w
y += 8
y *= x
z += y
w = int(chknum[3])
x *= 0
x += z
if x < 0 or 26 < 0: return [chknum,None,None,None,None, 'Failed on mod instruction after num 4']
x %= 26
if 1 == 0: return [chknum,None,None,None,None, 'Failed on div instruction after num 4']
z = math.trunc(z / 1)
x += 11
x = 1 if x == w else 0
x = 1 if x == 0 else 0
y *= 0
y += 25
y *= x
y += 1
z *= y
y *= 0
y += w
y += 7
y *= x
z += y
w = int(chknum[4])
x *= 0
x += z
if x < 0 or 26 < 0: return [chknum,None,None,None,None, 'Failed on mod instruction after num 5']
x %= 26
if 1 == 0: return [chknum,None,None,None,None, 'Failed on div instruction after num 5']
z = math.trunc(z / 1)
x += 14
x = 1 if x == w else 0
x = 1 if x == 0 else 0
y *= 0
y += 25
y *= x
y += 1
z *= y
y *= 0
y += w
y += 12
y *= x
z += y
w = int(chknum[5])
x *= 0
x += z
if x < 0 or 26 < 0: return [chknum,None,None,None,None, 'Failed on mod instruction after num 6']
x %= 26
if 26 == 0: return [chknum,None,None,None,None, 'Failed on div instruction after num 6']
z = math.trunc(z / 26)
x += -14
x = 1 if x == w else 0
x = 1 if x == 0 else 0
y *= 0
y += 25
y *= x
y += 1
z *= y
y *= 0
y += w
y += 7
y *= x
z += y
w = int(chknum[6])
x *= 0
x += z
if x < 0 or 26 < 0: return [chknum,None,None,None,None, 'Failed on mod instruction after num 7']
x %= 26
if 26 == 0: return [chknum,None,None,None,None, 'Failed on div instruction after num 7']
z = math.trunc(z / 26)
x += 0
x = 1 if x == w else 0
x = 1 if x == 0 else 0
y *= 0
y += 25
y *= x
y += 1
z *= y
y *= 0
y += w
y += 10
y *= x
z += y
w = int(chknum[7])
x *= 0
x += z
if x < 0 or 26 < 0: return [chknum,None,None,None,None, 'Failed on mod instruction after num 8']
x %= 26
if 1 == 0: return [chknum,None,None,None,None, 'Failed on div instruction after num 8']
z = math.trunc(z / 1)
x += 10
x = 1 if x == w else 0
x = 1 if x == 0 else 0
y *= 0
y += 25
y *= x
y += 1
z *= y
y *= 0
y += w
y += 14
y *= x
z += y
w = int(chknum[8])
x *= 0
x += z
if x < 0 or 26 < 0: return [chknum,None,None,None,None, 'Failed on mod instruction after num 9']
x %= 26
if 26 == 0: return [chknum,None,None,None,None, 'Failed on div instruction after num 9']
z = math.trunc(z / 26)
x += -10
x = 1 if x == w else 0
x = 1 if x == 0 else 0
y *= 0
y += 25
y *= x
y += 1
z *= y
y *= 0
y += w
y += 2
y *= x
z += y
w = int(chknum[9])
x *= 0
x += z
if x < 0 or 26 < 0: return [chknum,None,None,None,None, 'Failed on mod instruction after num 10']
x %= 26
if 1 == 0: return [chknum,None,None,None,None, 'Failed on div instruction after num 10']
z = math.trunc(z / 1)
x += 13
x = 1 if x == w else 0
x = 1 if x == 0 else 0
y *= 0
y += 25
y *= x
y += 1
z *= y
y *= 0
y += w
y += 6
y *= x
z += y
w = int(chknum[10])
x *= 0
x += z
if x < 0 or 26 < 0: return [chknum,None,None,None,None, 'Failed on mod instruction after num 11']
x %= 26
if 26 == 0: return [chknum,None,None,None,None, 'Failed on div instruction after num 11']
z = math.trunc(z / 26)
x += -12
x = 1 if x == w else 0
x = 1 if x == 0 else 0
y *= 0
y += 25
y *= x
y += 1
z *= y
y *= 0
y += w
y += 8
y *= x
z += y
w = int(chknum[11])
x *= 0
x += z
if x < 0 or 26 < 0: return [chknum,None,None,None,None, 'Failed on mod instruction after num 12']
x %= 26
if 26 == 0: return [chknum,None,None,None,None, 'Failed on div instruction after num 12']
z = math.trunc(z / 26)
x += -3
x = 1 if x == w else 0
x = 1 if x == 0 else 0
y *= 0
y += 25
y *= x
y += 1
z *= y
y *= 0
y += w
y += 11
y *= x
z += y
w = int(chknum[12])
x *= 0
x += z
if x < 0 or 26 < 0: return [chknum,None,None,None,None, 'Failed on mod instruction after num 13']
x %= 26
if 26 == 0: return [chknum,None,None,None,None, 'Failed on div instruction after num 13']
z = math.trunc(z / 26)
x += -11
x = 1 if x == w else 0
x = 1 if x == 0 else 0
y *= 0
y += 25
y *= x
y += 1
z *= y
y *= 0
y += w
y += 5
y *= x
z += y
w = int(chknum[13])
x *= 0
x += z
if x < 0 or 26 < 0: return [chknum,None,None,None,None, 'Failed on mod instruction after num 14']
x %= 26
if 26 == 0: return [chknum,None,None,None,None, 'Failed on div instruction after num 14']
z = math.trunc(z / 26)
x += -2
x = 1 if x == w else 0
x = 1 if x == 0 else 0
y *= 0
y += 25
y *= x
y += 1
z *= y
y *= 0
y += w
y += 11
y *= x
z += y
return [chknum,w,x,y,z,None]
maxnum = int("".join(['9']*14))
for i in range(maxnum, 1, -1):
if i % 10000 == 0:
print(i)
chknum = str(i)
if '0' in chknum:
continue
else:
_, w, x,y,z, err = monad(chknum)
if z == 0:
print(f"Found z=0: {chknum}")
break
| 22.381579
| 102
| 0.45679
| 1,266
| 6,804
| 2.454186
| 0.042654
| 0.216286
| 0.216286
| 0.153202
| 0.922755
| 0.915996
| 0.877374
| 0.847119
| 0.847119
| 0.847119
| 0
| 0.099178
| 0.392416
| 6,804
| 303
| 103
| 22.455446
| 0.652395
| 0
| 0
| 0.753378
| 0
| 0
| 0.156843
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003378
| false
| 0
| 0.003378
| 0
| 0.010135
| 0.006757
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
60787c90ed2c09fabcfc4b3607c62613bd74246b
| 62
|
py
|
Python
|
htsohm/generator/__init__.py
|
mas828/htsohm
|
2c491073fd81baf52e4051aa809db49658311172
|
[
"MIT"
] | 2
|
2020-08-27T16:46:41.000Z
|
2022-02-23T14:32:58.000Z
|
htsohm/generator/__init__.py
|
mas828/htsohm
|
2c491073fd81baf52e4051aa809db49658311172
|
[
"MIT"
] | 22
|
2016-06-13T20:37:56.000Z
|
2019-01-16T22:15:32.000Z
|
htsohm/generator/__init__.py
|
mas828/htsohm
|
2c491073fd81baf52e4051aa809db49658311172
|
[
"MIT"
] | 1
|
2020-09-05T15:26:17.000Z
|
2020-09-05T15:26:17.000Z
|
import htsohm.generator.random
import htsohm.generator.mutate
| 20.666667
| 30
| 0.870968
| 8
| 62
| 6.75
| 0.625
| 0.444444
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 62
| 2
| 31
| 31
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
607a00b8f20183d5172bcbdd04af0e26c556ba15
| 40
|
py
|
Python
|
cv_pack/__init__.py
|
OrenRenner/msu_master
|
ae49b20fe3347ad76eee8b1946ecb1596eae96fb
|
[
"MIT"
] | null | null | null |
cv_pack/__init__.py
|
OrenRenner/msu_master
|
ae49b20fe3347ad76eee8b1946ecb1596eae96fb
|
[
"MIT"
] | null | null | null |
cv_pack/__init__.py
|
OrenRenner/msu_master
|
ae49b20fe3347ad76eee8b1946ecb1596eae96fb
|
[
"MIT"
] | null | null | null |
from cv_pack.im_list import get_im_list
| 20
| 39
| 0.875
| 9
| 40
| 3.444444
| 0.777778
| 0.387097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.861111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
60b2af9de09831f509a1a5e05e9ee15b1eb3477c
| 2,014
|
py
|
Python
|
conrad/schema.py
|
hasan-haider/conrad
|
3e040b73a0cbec0ecee36c397c3bdedf9e439b54
|
[
"Apache-2.0"
] | 244
|
2019-10-27T22:40:17.000Z
|
2022-01-11T13:04:50.000Z
|
conrad/schema.py
|
hasan-haider/conrad
|
3e040b73a0cbec0ecee36c397c3bdedf9e439b54
|
[
"Apache-2.0"
] | 92
|
2019-10-27T23:01:34.000Z
|
2021-08-21T15:19:37.000Z
|
conrad/schema.py
|
hasan-haider/conrad
|
3e040b73a0cbec0ecee36c397c3bdedf9e439b54
|
[
"Apache-2.0"
] | 71
|
2019-10-28T03:05:42.000Z
|
2022-02-02T13:34:32.000Z
|
# -*- coding: utf-8 -*-
LATEST = "2"
f1 = "events.json"
v1 = {
"name": {"type": "string", "minlength": 1, "required": True},
"url": {"type": "string", "minlength": 1, "required": True},
"city": {"type": "string", "minlength": 1, "required": True},
"state": {"type": "string", "required": True, "nullable": True},
"country": {"type": "string", "minlength": 1, "required": True},
"cfp_open": {"type": "boolean", "required": True},
"cfp_end_date": {"is_date": True, "type": "string", "required": True},
"start_date": {"is_date": True, "type": "string", "required": True},
"end_date": {"is_date": True, "type": "string", "required": True},
"source": {"type": "string", "minlength": 1, "required": True},
"tags": {"type": "list", "minlength": 1, "required": True},
"kind": {"type": "string", "allowed": ["conference", "meetup"], "required": True},
"by": {"type": "string", "allowed": ["human", "bot"], "required": True},
}
f2 = "events_v2.json"
v2 = {
"name": {"type": "string", "minlength": 1, "required": True},
"url": {"type": "string", "minlength": 1, "required": True},
"city": {"type": "string", "required": True, "nullable": True},
"state": {"type": "string", "required": True, "nullable": True},
"country": {"type": "string", "required": True, "nullable": True},
"location": {"type": "string", "required": True, "nullable": True},
"cfp_open": {"type": "boolean", "required": True},
"cfp_end_date": {"is_date": True, "type": "string", "required": True},
"start_date": {"is_date": True, "type": "string", "required": True},
"end_date": {"is_date": True, "type": "string", "required": True},
"source": {"type": "string", "minlength": 1, "required": True},
"tags": {"type": "list", "minlength": 1, "required": True},
"kind": {"type": "string", "allowed": ["conference", "meetup"], "required": True},
"by": {"type": "string", "allowed": ["human", "bot"], "required": True},
}
latest = eval(f"v{LATEST}")
| 49.121951
| 86
| 0.5571
| 224
| 2,014
| 4.933036
| 0.196429
| 0.293213
| 0.179186
| 0.219005
| 0.933937
| 0.933937
| 0.834389
| 0.834389
| 0.834389
| 0.834389
| 0
| 0.010071
| 0.161867
| 2,014
| 40
| 87
| 50.35
| 0.64455
| 0.010427
| 0
| 0.628571
| 0
| 0
| 0.4666
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
60ff083def9ac67ca793cfbca2a55ddf3061bd98
| 4,487
|
py
|
Python
|
app/elastic/es_request_2tags_3aggs_json.py
|
ayushmaskey/log_analysis
|
c777f48117ec8e14845aa8d2deccc7f974ca232a
|
[
"MIT"
] | null | null | null |
app/elastic/es_request_2tags_3aggs_json.py
|
ayushmaskey/log_analysis
|
c777f48117ec8e14845aa8d2deccc7f974ca232a
|
[
"MIT"
] | null | null | null |
app/elastic/es_request_2tags_3aggs_json.py
|
ayushmaskey/log_analysis
|
c777f48117ec8e14845aa8d2deccc7f974ca232a
|
[
"MIT"
] | null | null | null |
def json_internal_to_internal( start, end):
'''kibana 24hour json'''
es_request_query = {
"query": {
"bool": {
"must": [
{
"match_all": {}
},
{
"bool": {
"should": [
{
"term": {
"destination_ip": "192.168.0.0/16"
}
},
{
"term": {
"destination_ip": "10.0.0.0/8"
}
},
{
"term": {
"destination_ip": "172.16.0.0/12"
}
}
]
}
},
{
"bool": {
"should": [
{
"term": {
"source_ip": "192.168.0.0/16"
}
},
{
"term": {
"source_ip": "10.0.0.0/8"
}
},
{
"term": {
"source_ip": "172.16.0.0/12"
}
}
]
}
},
{
"range": {
"@timestamp": {
"gte": start,
"lte": end,
"format": "epoch_millis"
}
}
},
],
"filter": [],
"should": [],
"must_not": []
}
}
,
"size": 0,
"aggs": {
"dest_ip": {
"terms": {
"field": "destination_ips.keyword",
"size": 1000000,
# "order": {
# "_count": "desc"
# }
},
"aggs": {
"dest_port": {
"terms": {
"field": "destination_port",
# "size": 10,
},
"aggs": {
"server_name": {
"terms": {
"field": "server_name.keyword",
# "size": 1000,
}
}
}
}
}
}
}
}
return es_request_query
def json_internal_to_external( start, end):
'''kibana 24hour json'''
es_request_query = {
"query": {
"bool": {
"must": [
{
"match_all": {}
},
{
"range": {
"@timestamp": {
"gte": start,
"lte": end,
"format": "epoch_millis"
}
}
},
{
"bool": {
"must_not": [
{
"term": {
"destination_ip": "192.168.0.0/16"
}
},
{
"term": {
"destination_ip": "10.0.0.0/8"
}
},
{
"term": {
"destination_ip": "172.16.0.0/12"
}
}
]
}
},
{
"bool": {
"should": [
{
"term": {
"source_ip": "192.168.0.0/16"
}
},
{
"term": {
"source_ip": "10.0.0.0/8"
}
},
{
"term": {
"source_ip": "172.16.0.0/12"
}
}
]
}
}
],
"filter": [],
"should": [],
"must_not": []
}
}
,
"size": 0,
"aggs": {
"dest_ip": {
"terms": {
"field": "destination_ips.keyword",
"size": 200000,
# "order": {
# "_count": "desc"
# }
},
"aggs": {
"dest_port": {
"terms": {
"field": "destination_port",
# "size": 1000,
},
"aggs": {
"server_name": {
"terms": {
"field": "server_name.keyword",
# "size": 1000,
}
}
}
}
}
}
}
}
return es_request_query
if __name__ == "__main__":
ind = "*"
start = "now-1d"
end = "now"
tag1 = "*"
tag2 = "*"
build_json_query = json_internal_to_internal( start, end)
print(build_json_query)
| 20.96729
| 58
| 0.254067
| 267
| 4,487
| 4.037453
| 0.2397
| 0.029685
| 0.09462
| 0.033395
| 0.871985
| 0.871985
| 0.823748
| 0.823748
| 0.823748
| 0.74026
| 0
| 0.076362
| 0.603076
| 4,487
| 214
| 59
| 20.96729
| 0.528916
| 0.034767
| 0
| 0.457895
| 0
| 0
| 0.192111
| 0.010673
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010526
| false
| 0
| 0
| 0
| 0.021053
| 0.005263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7168c52363c19ab1576b9726f739f99a6a6afb76
| 183
|
py
|
Python
|
python-project/pythonproject/foo.py
|
hgrif/oozie-pyspark-workflow
|
5351e24800766e9836d5c022d9ad8769d9d24faf
|
[
"CNRI-Python"
] | 14
|
2017-08-11T12:53:16.000Z
|
2021-02-16T16:11:37.000Z
|
python-project/pythonproject/foo.py
|
hgrif/oozie-pyspark-workflow
|
5351e24800766e9836d5c022d9ad8769d9d24faf
|
[
"CNRI-Python"
] | null | null | null |
python-project/pythonproject/foo.py
|
hgrif/oozie-pyspark-workflow
|
5351e24800766e9836d5c022d9ad8769d9d24faf
|
[
"CNRI-Python"
] | 6
|
2017-05-23T08:00:03.000Z
|
2020-07-16T15:20:44.000Z
|
def process(hive_context, date):
print('Processing foo')
print(hive_context, date)
def compute(hive_context, date):
print('Computing foo')
print(hive_context, date)
| 20.333333
| 32
| 0.704918
| 24
| 183
| 5.208333
| 0.416667
| 0.352
| 0.48
| 0.32
| 0.368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174863
| 183
| 8
| 33
| 22.875
| 0.827815
| 0
| 0
| 0.333333
| 0
| 0
| 0.147541
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
7187dccaa28ac8720995c2dd972a222b95b79a37
| 2,090
|
py
|
Python
|
src/main.py
|
andrea-allen/datalab-demo
|
3f2c3eeb542080491af750e4e163ad77238a02af
|
[
"Apache-2.0"
] | null | null | null |
src/main.py
|
andrea-allen/datalab-demo
|
3f2c3eeb542080491af750e4e163ad77238a02af
|
[
"Apache-2.0"
] | null | null | null |
src/main.py
|
andrea-allen/datalab-demo
|
3f2c3eeb542080491af750e4e163ad77238a02af
|
[
"Apache-2.0"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
from src import dataskills
if __name__ == '__main__':
print('Main method for Data Lab Demo')
############ DEMO ONE #########################
# Here we have code directly in the main method
# two_plus_one = 2 + 1
# magic_math_op = [] #Define an empty list for results
# for i in range(6):
# magic_math_op.append(np.log10(two_plus_one ** i))
# magic_math_op_as_array = np.array(magic_math_op)
# range_one_to_ten = np.arange(6)
# plt.plot(range_one_to_ten, magic_math_op_as_array, color='red', label=f'Base {two_plus_one}')
#
# # Repeated code chunk
# two_plus_three = 2 + 3
# magic_math_op = []
# for i in range(6):
# magic_math_op.append(two_plus_three ** i) # Same method repeated
# magic_math_op_as_array = np.array(magic_math_op)
# range_one_to_ten = np.arange(6)
# plt.plot(range_one_to_ten, magic_math_op_as_array, color='blue', label=f'Base {two_plus_three}')
#
# plt.title('Powers Plot')
# plt.legend(loc='upper left')
# plt.show()
#Here we have code calling some functions pre-defined, in the main file, but not the main method
# two_plus_one = 2 + 1
# magic_math_op = []
# for i in range(6):
# magic_math_op.append(dataskills.magic_math_op_method(two_plus_one, i))
# magic_math_op_as_array = np.array(magic_math_op)
# range_one_to_ten = np.arange(6)
# plt.plot(range_one_to_ten, magic_math_op_as_array, color='red', label=f'Base {two_plus_one}')
#
# two_plus_three = 2 + 3
# magic_math_op = []
# for i in range(6):
# magic_math_op.append(dataskills.magic_math_op_method(two_plus_three, i))
# magic_math_op_as_array = np.array(magic_math_op)
# range_one_to_ten = np.arange(6)
# plt.plot(range_one_to_ten, magic_math_op_as_array, color='blue', label=f'Base {two_plus_three}')
#
# plt.title('Powers Plot')
# plt.legend(loc='upper left')
# plt.show()
######### DEMO TWO ##############
dataskills.covid_data_demo()
| 27.866667
| 102
| 0.646411
| 332
| 2,090
| 3.707831
| 0.228916
| 0.160845
| 0.196588
| 0.084484
| 0.74411
| 0.74411
| 0.74411
| 0.74411
| 0.74411
| 0.720552
| 0
| 0.010936
| 0.21244
| 2,090
| 74
| 103
| 28.243243
| 0.736938
| 0.747847
| 0
| 0
| 0
| 0
| 0.091133
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.166667
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
71bee2a0c9be8742c23f28767af802000e0ab728
| 55,301
|
py
|
Python
|
tests/sandbox/data/scenarios.py
|
uk-gov-mirror/nhsdigital.personal-demographics-service-api
|
e02cc57b14d9ed2b3785a8f9efb43f40d5833284
|
[
"MIT"
] | null | null | null |
tests/sandbox/data/scenarios.py
|
uk-gov-mirror/nhsdigital.personal-demographics-service-api
|
e02cc57b14d9ed2b3785a8f9efb43f40d5833284
|
[
"MIT"
] | null | null | null |
tests/sandbox/data/scenarios.py
|
uk-gov-mirror/nhsdigital.personal-demographics-service-api
|
e02cc57b14d9ed2b3785a8f9efb43f40d5833284
|
[
"MIT"
] | null | null | null |
retrieve = [
{"scenario":"Patient Exists","patient":"9000000009", "response":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"},{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"T456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","text":"Student Accommodation","use":"temp"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NominatedPharmacy","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"}}},{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-PreferredDispenserOrganization","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y23456"}}},{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-MedicalApplianceSupplier","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y34567"}}},{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"},{"extension":[{"url":"language","valueCodeableConcept":{"coding":[{"code":"fr","display":"French","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-HumanLanguage","version":"1.0.0"}]}},{"url":"interpreterRequired","valueBoolean":True}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSCommunication"},{"extension":[{"url":"PreferredWrittenCommunicationFormat","valueCodeableConcept":{"coding":[{"code":"12","display":"Braille","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredWrittenCommunicationFormat"}]}},{"url":"PreferredContactMethod","valueCodeableConcept":{"coding":[{"code":"1","display":"Letter","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredContactMethod"}]}},{"url":"PreferredContactTimes","valueString":"Not after 7pm"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-ContactPreference"},{"url":"http://hl7.org/fhir/StructureDefinition/patient-birthPlace","valueAddress":{"city":"Manchester","country":"GBR","district":"Greater Manchester"}}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000009","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000009"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":"2"},"multipleBirthInteger":1,"name":[{"family":"Smith","given":["Jane"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}}, # noqa: E231, E501
{"scenario":"Patient Does Not Exist","patient":"9111231130", "response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"not_found","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"RESOURCE_NOT_FOUND","display":"Resource not found"}]}}]}}, # noqa: E231, E501
{"scenario":"Sensetive Patient Exists","patient":"9000000025", "response":{"birthDate":"2010-10-22","deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"}],"gender":"female","id":"9000000025","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000025"}],"meta":{"security":[{"code":"R","display":"restricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":"2"},"multipleBirthInteger":1,"name":[{"family":"Smythe","given":["Janet"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"resourceType":"Patient"}}, # noqa: E231, E501
{"scenario": "Invalid NHS number", "patient": "9000000001", "response": {"resourceType": "OperationOutcome", "issue": [{"severity": "error", "code": "value", "details": {"coding": [{"system": "https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode", "version": "1", "code": "INVALID_RESOURCE_ID", "display": "Resource Id is invalid"}]}}]}}, # noqa: E231, E501
{"scenario": "Invalid X-Request-ID", "patient": "9000000001", "response": {"resourceType": "OperationOutcome", "issue": [{"severity": "error", "code": "value", "details": {"coding": [{"system": "https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode", "version": "1", "code": "INVALID_VALUE", "display": "Provided value is invalid"}]}, "diagnostics": "Invalid value - '1234' in header 'X-Request-ID'"}]}} # noqa: E231, E501
]
search = [
{"scenario":"Simple Search","query_params":{"family":"Smith","gender":"female","birthdate":"eq2010-10-22"},"response":{"resourceType":"Bundle","type":"searchset","total":1,"entry":[{"fullUrl":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/9000000009","search":{"score":1},"resource":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000009","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000009"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":"2"},"multipleBirthInteger":1,"name":[{"family":"Smith","given":["Jane"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}}]}}, # noqa: E231, E501
{"scenario":"Wildcard Search","query_params":{"family":"Sm*","gender":"female","birthdate":"eq2010-10-22"},"response":{"resourceType":"Bundle","type":"searchset","total":2,"entry":[{"fullUrl":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/9000000009","search":{"score":0.8343},"resource":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000009","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000009"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":"2"},"multipleBirthInteger":1,"name":[{"family":"Smith","given":["Jane"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}},{"fullUrl":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/9000000017","search":{"score":0.8343},"resource":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000017","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000017"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":"2"},"multipleBirthInteger":1,"name":[{"family":"Smyth","given":["Jayne"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}}]}}, # noqa: E231, E501
{"scenario":"Limited results Search","query_params":{"family":"Sm*","gender":"female","birthdate":"eq2010-10-22","_max-results":"2"},"response":{"resourceType":"Bundle","type":"searchset","total":2,"entry":[{"fullUrl":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/9000000009","search":{"score":0.8343},"resource":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000009","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000009"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":"2"},"multipleBirthInteger":1,"name":[{"family":"Smith","given":["Jane"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}},{"fullUrl":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/9000000017","search":{"score":0.8343},"resource":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000017","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000017"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":"2"},"multipleBirthInteger":1,"name":[{"family":"Smyth","given":["Jayne"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}}]}}, # noqa: E231, E501
{"scenario":"Date Range Search","response":{"resourceType":"Bundle","type":"searchset","total":1,"entry":[{"fullUrl":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/9000000009","search":{"score":1},"resource":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000009","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000009"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":"2"},"multipleBirthInteger":1,"name":[{"family":"Smith","given":["Jane"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}}]}}, # noqa: E231, E501
{"scenario":"Fuzzy Search","query_params":{"family":"Smith","given":"jane","gender":"female","birthdate":"2010-10-22","_fuzzy-match":True},"response":{"resourceType":"Bundle","type":"searchset","total":1,"entry":[{"fullUrl":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/9000000017","search":{"score":0.8976},"resource":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000017","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000017"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":"2"},"multipleBirthInteger":1,"name":[{"family":"Smyth","given":["Jayne"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}}]}}, # noqa: E231, E501
{"scenario": "Restricted Patient Search","query_params": {"family": "Smythe", "given": "janet", "gender": "female", "birthdate": "eq2005-06-16"}, "response": {"resourceType": "Bundle", "type": "searchset", "total": 1, "entry": [{"fullUrl": "https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/9000000025", "search": {"score": 1}, "resource": {"birthDate": "2005-06-16", "deceasedDateTime": "2005-06-16T00:00:00+00:00", "extension": [{"extension": [{"url": "deathNotificationStatus", "valueCodeableConcept": {"coding": [{"code": "2", "display": "Formal - death notice received from Registrar of Deaths", "system": "https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus", "version": "1.0.0"}]}}, {"url": "systemEffectiveDate", "valueDateTime": "2005-06-16T00:00:00+00:00"}], "url": "https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"}], "gender": "female", "id": "9000000025","identifier": [{"extension": [{"url": "https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus", "valueCodeableConcept": {"coding": [{"code": "01", "display": "Number present and verified", "system": "https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus", "version": "1.0.0"}]}}], "system": "https://fhir.nhs.uk/Id/nhs-number", "value": "9000000025"}], "meta": {"security": [{"code": "R", "display": "restricted", "system": "https://www.hl7.org/fhir/valueset-security-labels.html"}], "versionId": "2"}, "multipleBirthInteger": 1, "name": [{"family": "Smythe", "given": ["Janet"], "id": "123", "period": {"end": "2021-12-31", "start": "2020-01-01"}, "prefix": ["Mrs"], "suffix": ["MBE"], "use": "usual"}], "resourceType": "Patient"}}]}}, # noqa: E231, E501
{"scenario":"Unsuccessful Search","query_params":{"family":"Bingham","given":"john","gender":"male","birthdate":"1934-12-18"},"response":{"resourceType":"Bundle","type":"searchset","total":0}}, # noqa: E231, E501
{"scenario": "Invalid Date Format Search","query_params": {"family": "Smith", "given": "jane", "gender": "female", "birthdate": "20101022"}, "response": {"resourceType": "OperationOutcome", "issue": [{"severity": "error", "code": "value", "details": {"coding": [{"system": "https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode", "version": "1", "code": "INVALID_SEARCH_DATA", "display": "Search data is invalid"}]}, "diagnostics": "Invalid value - '20101022' in field 'birthdate'"}]}}, # noqa: E231, E501
{"scenario":"Too Few Search Parameters","response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"required","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"MISSING_VALUE","display":"Required value is missing"}]},"diagnostics":"Not enough search parameters were provided to be able to make a search"}]}}, # noqa: E231, E501
]
update = [
{"scenario":"Add New Name", "patient":"9000000009","patient_record":2,"patch":{"patches": [{"op": "add", "path": "/name/-", "value": {"use": "usual", "period": {"start": "2019-12-31"}, "prefix": "Dr", "given": ["Joe", "Horation", "Maximus"], "family": "Bloggs", "suffix": "PhD"}}]},"response":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"},{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"T456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","text":"Student Accommodation","use":"temp"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NominatedPharmacy","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"}}},{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-PreferredDispenserOrganization","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y23456"}}},{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-MedicalApplianceSupplier","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y34567"}}},{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"},{"extension":[{"url":"language","valueCodeableConcept":{"coding":[{"code":"fr","display":"French","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-HumanLanguage","version":"1.0.0"}]}},{"url":"interpreterRequired","valueBoolean":True}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSCommunication"},{"extension":[{"url":"PreferredWrittenCommunicationFormat","valueCodeableConcept":{"coding":[{"code":"12","display":"Braille","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredWrittenCommunicationFormat"}]}},{"url":"PreferredContactMethod","valueCodeableConcept":{"coding":[{"code":"1","display":"Letter","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredContactMethod"}]}},{"url":"PreferredContactTimes","valueString":"Not after 7pm"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-ContactPreference"},{"url":"http://hl7.org/fhir/StructureDefinition/patient-birthPlace","valueAddress":{"city":"Manchester","country":"GBR","district":"Greater Manchester"}}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000009","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000009"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":3},"multipleBirthInteger":1,"name":[{"family":"Smith","given":["Jane"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"},{"use":"usual","period":{"start":"2019-12-31"},"prefix":"Dr","given":["Joe","Horation","Maximus"],"family":"Bloggs","suffix":"PhD"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}}, # noqa: E231, E501
{"scenario":"Replace Given Name", "patient":"9000000009","patient_record":2,"patch":{"patches":[{"op":"replace","path":"/name/0/given/0","value":"Anne"}]},"response":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"},{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"T456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","text":"Student Accommodation","use":"temp"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NominatedPharmacy","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"}}},{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-PreferredDispenserOrganization","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y23456"}}},{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-MedicalApplianceSupplier","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y34567"}}},{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"},{"extension":[{"url":"language","valueCodeableConcept":{"coding":[{"code":"fr","display":"French","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-HumanLanguage","version":"1.0.0"}]}},{"url":"interpreterRequired","valueBoolean":True}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSCommunication"},{"extension":[{"url":"PreferredWrittenCommunicationFormat","valueCodeableConcept":{"coding":[{"code":"12","display":"Braille","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredWrittenCommunicationFormat"}]}},{"url":"PreferredContactMethod","valueCodeableConcept":{"coding":[{"code":"1","display":"Letter","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredContactMethod"}]}},{"url":"PreferredContactTimes","valueString":"Not after 7pm"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-ContactPreference"},{"url":"http://hl7.org/fhir/StructureDefinition/patient-birthPlace","valueAddress":{"city":"Manchester","country":"GBR","district":"Greater Manchester"}}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000009","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000009"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":3},"multipleBirthInteger":1,"name":[{"family":"Smith","given":["Anne"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}}, # noqa: E231, E501
{"scenario":"Remove Suffix from Name", "patient":"9000000009","patient_record":2,"patch":{"patches":[{"op":"test","path":"/name/0/id","value":"123"},{"op":"remove","path":"/name/0/suffix/0"}]},"response":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"},{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"T456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","text":"Student Accommodation","use":"temp"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NominatedPharmacy","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"}}},{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-PreferredDispenserOrganization","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y23456"}}},{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-MedicalApplianceSupplier","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y34567"}}},{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"},{"extension":[{"url":"language","valueCodeableConcept":{"coding":[{"code":"fr","display":"French","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-HumanLanguage","version":"1.0.0"}]}},{"url":"interpreterRequired","valueBoolean":True}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSCommunication"},{"extension":[{"url":"PreferredWrittenCommunicationFormat","valueCodeableConcept":{"coding":[{"code":"12","display":"Braille","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredWrittenCommunicationFormat"}]}},{"url":"PreferredContactMethod","valueCodeableConcept":{"coding":[{"code":"1","display":"Letter","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredContactMethod"}]}},{"url":"PreferredContactTimes","valueString":"Not after 7pm"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-ContactPreference"},{"url":"http://hl7.org/fhir/StructureDefinition/patient-birthPlace","valueAddress":{"city":"Manchester","country":"GBR","district":"Greater Manchester"}}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000009","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000009"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":3},"multipleBirthInteger":1,"name":[{"family":"Smith","given":["Jane"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":[],"use":"usual"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}}, # noqa: E231, E501
{"scenario":"No Patch Sent", "patient":"9000000009","patient_record":2,"patch":{},"response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"structure","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"INVALID_UPDATE","display":"Update is invalid"}]},"diagnostics":"Invalid update with error - No patches found"}]}}, # noqa: E231, E501
{"scenario":"Incorrect resource version", "patient":"9000000009","patient_record":3,"patch":{"patches":[{"op":"test","path":"/name/0/id","value":"123"},{"op":"remove","path":"/name/0/suffix/0"}]},"response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"structure","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"PRECONDITION_FAILED","display":"Required condition was not fulfilled"}]},"diagnostics":"Invalid update with error - This resource has changed since you last read. Please re-read and try again with the new version number."}]}}, # noqa: E231, E501
{"scenario":"Invalid Request ID", "patient":"9000000009","patient_record":2,"patch":{"patches":[{"op":"test","path":"/name/0/id","value":"123"},{"op":"remove","path":"/name/0/suffix/0"}]},"response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"value","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"INVALID_VALUE","display":"Provided value is invalid"}]},"diagnostics":"Invalid value - '12345' in header 'X-Request-ID'"}]}}, # noqa: E231, E501
{"scenario":"Missing If Match Header", "patient":"9000000009","patch":{"patches":[{"op":"test","path":"/name/0/id","value":"123"},{"op":"remove","path":"/name/0/suffix/0"}]},"response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"structure","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"PRECONDITION_FAILED","display":"Required condition was not fulfilled"}]},"diagnostics":"Invalid update with error - If-Match header must be supplied to update this resource"}]}}, # noqa: E231, E501
{"scenario":"Incorrect Content Type", "patient":"9000000009","patch":{"patches":[{"op":"test","path":"/name/0/id","value":"123"},{"op":"remove","path":"/name/0/suffix/0"}]},"response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"processing","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"UNSUPPORTED_SERVICE","display":"Unsupported Service"}]}}]}}, # noqa: E231, E501
{"scenario":"Invalid patch", "patient":"9000000009","patient_record":2, "patch":{"patches":[{"op":"bad_value","path":"not a path"}]},"response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"structure","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"INVALID_UPDATE","display":"Update is invalid"}]},"diagnostics":"Invalid patch: Operation `op` property is not one of operations defined in RFC-6902"}]}}, # noqa: E231, E501
{"scenario":"Invalid NHS Number", "patient":"9000000000","patient_record":2,"patch":{"patches":[{"op":"test","path":"/name/0/id","value":"123"},{"op":"remove","path":"/name/0/suffix/0"}]},"response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"value","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"INVALID_RESOURCE_ID","display":"Resource Id is invalid"}]}}]}}, # noqa: E231, E501
{"scenario":"Patient does not Exist", "patient":"9111231130","patient_record":2,"patch":{"patches":[{"op":"test","path":"/name/0/id","value":"123"},{"op":"remove","path":"/name/0/suffix/0"}]},"response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"not_found","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"RESOURCE_NOT_FOUND","display":"Resource not found"}]}}]}} # noqa: E231, E501
]
relatedPerson = [
{"scenario":"Related Person Exists","patient":"9000000009", "response":{"resourceType":"Bundle","type":"searchset","total":2,"entry":[{"fullUrl":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/9000000009/RelatedPerson/507B7621","resource":{"active":True,"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"}],"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-CopyCorrespondenceIndicator","valueBoolean":True},{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-ContactRank","valuePositiveInt":1},{"extension":[{"url":"PreferredWrittenCommunicationFormat","valueCodeableConcept":{"coding":[{"code":"12","display":"Braille","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredWrittenCommunicationFormat"}]}},{"url":"PreferredContactMethod","valueCodeableConcept":{"coding":[{"code":"1","display":"Letter","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredContactMethod"}]}},{"url":"PreferredContactTimes","valueString":"Not after 7pm"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-ContactPreference"},{"extension":[{"url":"language","valueCodeableConcept":{"coding":[{"code":"fr","display":"French","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-HumanLanguage","version":"1.0.0"}]}},{"url":"interpreterRequired","valueBoolean":True}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSCommunication"}],"id":"507B7621","name":[{"family":"Smith","given":["Jane"],"period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"patient":{"identifier":{"system":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient","value":"90000000009"},"reference":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/90000000009","type":"Patient"},"period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"Guardian","display":"Guardian of patient","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AdditionalRelatedPersonRole"}]}],"resourceType":"RelatedPerson","telecom":[{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"}]}},{"fullUrl":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/9000000009/RelatedPerson/B3380E98","resource":{"active":True,"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-CopyCorrespondenceIndicator","valueBoolean":True},{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-ContactRank","valuePositiveInt":1},{"extension":[{"url":"PreferredWrittenCommunicationFormat","valueCodeableConcept":{"coding":[{"code":"12","display":"Braille","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredWrittenCommunicationFormat"}]}},{"url":"PreferredContactMethod","valueCodeableConcept":{"coding":[{"code":"1","display":"Letter","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredContactMethod"}]}},{"url":"PreferredContactTimes","valueString":"Not after 7pm"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-ContactPreference"},{"extension":[{"url":"language","valueCodeableConcept":{"coding":[{"code":"fr","display":"French","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-HumanLanguage","version":"1.0.0"}]}},{"url":"interpreterRequired","valueBoolean":True}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSCommunication"}],"id":"B3380E98","patient":{"identifier":{"system":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient","value":"90000000009"},"reference":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/90000000009","type":"Patient"},"period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"Guardian","display":"Guardian of patient","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AdditionalRelatedPersonRole"}]}],"resourceType":"RelatedPerson"}}]}}, # noqa: E231, E501
{"scenario":"Patient Does Not Exist","patient":"9111231130","response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"not_found","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"RESOURCE_NOT_FOUND","display":"Resource not found"}]}}]}}, # noqa: E231, E501
{"scenario": "Related Person Does Not Exist", "patient": "9000000025", "response": {"resourceType":"Bundle","type":"searchset","total":0}} # noqa: E231, E501
]
| 1,316.690476
| 5,859
| 0.694978
| 6,595
| 55,301
| 5.821228
| 0.04928
| 0.028392
| 0.063765
| 0.074392
| 0.964497
| 0.958298
| 0.95736
| 0.955276
| 0.951004
| 0.948347
| 0
| 0.076712
| 0.017739
| 55,301
| 41
| 5,860
| 1,348.804878
| 0.630044
| 0.008589
| 0
| 0.055556
| 0
| 0.361111
| 0.756301
| 0.025859
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
71e29344f6fb37e0de62ca30ec04a12dd913d537
| 30,476
|
py
|
Python
|
sdk/python/pulumi_onelogin/_inputs.py
|
AaronFriel/pulumi-onelogin
|
4528112d16d32fe9f233c67e0070fe6507c5d2e0
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_onelogin/_inputs.py
|
AaronFriel/pulumi-onelogin
|
4528112d16d32fe9f233c67e0070fe6507c5d2e0
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_onelogin/_inputs.py
|
AaronFriel/pulumi-onelogin
|
4528112d16d32fe9f233c67e0070fe6507c5d2e0
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'AppParameterArgs',
'AppRuleActionArgs',
'AppRuleConditionArgs',
'AuthServerConfigurationArgs',
'OidcAppParameterArgs',
'PrivilegePrivilegeArgs',
'PrivilegePrivilegeStatementArgs',
'SamlAppParameterArgs',
'SmartHookConditionArgs',
'SmartHookOptionsArgs',
'UserMappingActionArgs',
'UserMappingConditionArgs',
]
@pulumi.input_type
class AppParameterArgs:
def __init__(__self__, *,
param_key_name: pulumi.Input[str],
attributes_transformations: Optional[pulumi.Input[str]] = None,
default_values: Optional[pulumi.Input[str]] = None,
include_in_saml_assertion: Optional[pulumi.Input[bool]] = None,
label: Optional[pulumi.Input[str]] = None,
param_id: Optional[pulumi.Input[int]] = None,
provisioned_entitlements: Optional[pulumi.Input[bool]] = None,
safe_entitlements_enabled: Optional[pulumi.Input[bool]] = None,
skip_if_blank: Optional[pulumi.Input[bool]] = None,
user_attribute_macros: Optional[pulumi.Input[str]] = None,
user_attribute_mappings: Optional[pulumi.Input[str]] = None,
values: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "param_key_name", param_key_name)
if attributes_transformations is not None:
pulumi.set(__self__, "attributes_transformations", attributes_transformations)
if default_values is not None:
pulumi.set(__self__, "default_values", default_values)
if include_in_saml_assertion is not None:
pulumi.set(__self__, "include_in_saml_assertion", include_in_saml_assertion)
if label is not None:
pulumi.set(__self__, "label", label)
if param_id is not None:
pulumi.set(__self__, "param_id", param_id)
if provisioned_entitlements is not None:
pulumi.set(__self__, "provisioned_entitlements", provisioned_entitlements)
if safe_entitlements_enabled is not None:
pulumi.set(__self__, "safe_entitlements_enabled", safe_entitlements_enabled)
if skip_if_blank is not None:
pulumi.set(__self__, "skip_if_blank", skip_if_blank)
if user_attribute_macros is not None:
pulumi.set(__self__, "user_attribute_macros", user_attribute_macros)
if user_attribute_mappings is not None:
pulumi.set(__self__, "user_attribute_mappings", user_attribute_mappings)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter(name="paramKeyName")
def param_key_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "param_key_name")
@param_key_name.setter
def param_key_name(self, value: pulumi.Input[str]):
pulumi.set(self, "param_key_name", value)
@property
@pulumi.getter(name="attributesTransformations")
def attributes_transformations(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "attributes_transformations")
@attributes_transformations.setter
def attributes_transformations(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "attributes_transformations", value)
@property
@pulumi.getter(name="defaultValues")
def default_values(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "default_values")
@default_values.setter
def default_values(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_values", value)
@property
@pulumi.getter(name="includeInSamlAssertion")
def include_in_saml_assertion(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "include_in_saml_assertion")
@include_in_saml_assertion.setter
def include_in_saml_assertion(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "include_in_saml_assertion", value)
@property
@pulumi.getter
def label(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "label")
@label.setter
def label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "label", value)
@property
@pulumi.getter(name="paramId")
def param_id(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "param_id")
@param_id.setter
def param_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "param_id", value)
@property
@pulumi.getter(name="provisionedEntitlements")
def provisioned_entitlements(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "provisioned_entitlements")
@provisioned_entitlements.setter
def provisioned_entitlements(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "provisioned_entitlements", value)
@property
@pulumi.getter(name="safeEntitlementsEnabled")
def safe_entitlements_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "safe_entitlements_enabled")
@safe_entitlements_enabled.setter
def safe_entitlements_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "safe_entitlements_enabled", value)
@property
@pulumi.getter(name="skipIfBlank")
def skip_if_blank(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "skip_if_blank")
@skip_if_blank.setter
def skip_if_blank(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_if_blank", value)
@property
@pulumi.getter(name="userAttributeMacros")
def user_attribute_macros(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "user_attribute_macros")
@user_attribute_macros.setter
def user_attribute_macros(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_attribute_macros", value)
@property
@pulumi.getter(name="userAttributeMappings")
def user_attribute_mappings(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "user_attribute_mappings")
@user_attribute_mappings.setter
def user_attribute_mappings(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_attribute_mappings", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class AppRuleActionArgs:
def __init__(__self__, *,
action: pulumi.Input[str],
values: pulumi.Input[Sequence[pulumi.Input[str]]],
expression: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "values", values)
if expression is not None:
pulumi.set(__self__, "expression", expression)
@property
@pulumi.getter
def action(self) -> pulumi.Input[str]:
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input[str]):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def values(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
return pulumi.get(self, "values")
@values.setter
def values(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "values", value)
@property
@pulumi.getter
def expression(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "expression")
@expression.setter
def expression(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expression", value)
@pulumi.input_type
class AppRuleConditionArgs:
def __init__(__self__, *,
operator: pulumi.Input[str],
source: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "operator", operator)
pulumi.set(__self__, "source", source)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def source(self) -> pulumi.Input[str]:
return pulumi.get(self, "source")
@source.setter
def source(self, value: pulumi.Input[str]):
pulumi.set(self, "source", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class AuthServerConfigurationArgs:
def __init__(__self__, *,
audiences: pulumi.Input[Sequence[pulumi.Input[str]]],
resource_identifier: pulumi.Input[str],
access_token_expiration_minutes: Optional[pulumi.Input[int]] = None,
refresh_token_expiration_minutes: Optional[pulumi.Input[int]] = None):
pulumi.set(__self__, "audiences", audiences)
pulumi.set(__self__, "resource_identifier", resource_identifier)
if access_token_expiration_minutes is not None:
pulumi.set(__self__, "access_token_expiration_minutes", access_token_expiration_minutes)
if refresh_token_expiration_minutes is not None:
pulumi.set(__self__, "refresh_token_expiration_minutes", refresh_token_expiration_minutes)
@property
@pulumi.getter
def audiences(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
return pulumi.get(self, "audiences")
@audiences.setter
def audiences(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "audiences", value)
@property
@pulumi.getter(name="resourceIdentifier")
def resource_identifier(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_identifier")
@resource_identifier.setter
def resource_identifier(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_identifier", value)
@property
@pulumi.getter(name="accessTokenExpirationMinutes")
def access_token_expiration_minutes(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "access_token_expiration_minutes")
@access_token_expiration_minutes.setter
def access_token_expiration_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "access_token_expiration_minutes", value)
@property
@pulumi.getter(name="refreshTokenExpirationMinutes")
def refresh_token_expiration_minutes(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "refresh_token_expiration_minutes")
@refresh_token_expiration_minutes.setter
def refresh_token_expiration_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "refresh_token_expiration_minutes", value)
@pulumi.input_type
class OidcAppParameterArgs:
def __init__(__self__, *,
param_key_name: pulumi.Input[str],
attributes_transformations: Optional[pulumi.Input[str]] = None,
default_values: Optional[pulumi.Input[str]] = None,
include_in_saml_assertion: Optional[pulumi.Input[bool]] = None,
label: Optional[pulumi.Input[str]] = None,
param_id: Optional[pulumi.Input[int]] = None,
provisioned_entitlements: Optional[pulumi.Input[bool]] = None,
safe_entitlements_enabled: Optional[pulumi.Input[bool]] = None,
skip_if_blank: Optional[pulumi.Input[bool]] = None,
user_attribute_macros: Optional[pulumi.Input[str]] = None,
user_attribute_mappings: Optional[pulumi.Input[str]] = None,
values: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "param_key_name", param_key_name)
if attributes_transformations is not None:
pulumi.set(__self__, "attributes_transformations", attributes_transformations)
if default_values is not None:
pulumi.set(__self__, "default_values", default_values)
if include_in_saml_assertion is not None:
pulumi.set(__self__, "include_in_saml_assertion", include_in_saml_assertion)
if label is not None:
pulumi.set(__self__, "label", label)
if param_id is not None:
pulumi.set(__self__, "param_id", param_id)
if provisioned_entitlements is not None:
pulumi.set(__self__, "provisioned_entitlements", provisioned_entitlements)
if safe_entitlements_enabled is not None:
pulumi.set(__self__, "safe_entitlements_enabled", safe_entitlements_enabled)
if skip_if_blank is not None:
pulumi.set(__self__, "skip_if_blank", skip_if_blank)
if user_attribute_macros is not None:
pulumi.set(__self__, "user_attribute_macros", user_attribute_macros)
if user_attribute_mappings is not None:
pulumi.set(__self__, "user_attribute_mappings", user_attribute_mappings)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter(name="paramKeyName")
def param_key_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "param_key_name")
@param_key_name.setter
def param_key_name(self, value: pulumi.Input[str]):
pulumi.set(self, "param_key_name", value)
@property
@pulumi.getter(name="attributesTransformations")
def attributes_transformations(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "attributes_transformations")
@attributes_transformations.setter
def attributes_transformations(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "attributes_transformations", value)
@property
@pulumi.getter(name="defaultValues")
def default_values(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "default_values")
@default_values.setter
def default_values(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_values", value)
@property
@pulumi.getter(name="includeInSamlAssertion")
def include_in_saml_assertion(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "include_in_saml_assertion")
@include_in_saml_assertion.setter
def include_in_saml_assertion(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "include_in_saml_assertion", value)
@property
@pulumi.getter
def label(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "label")
@label.setter
def label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "label", value)
@property
@pulumi.getter(name="paramId")
def param_id(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "param_id")
@param_id.setter
def param_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "param_id", value)
@property
@pulumi.getter(name="provisionedEntitlements")
def provisioned_entitlements(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "provisioned_entitlements")
@provisioned_entitlements.setter
def provisioned_entitlements(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "provisioned_entitlements", value)
@property
@pulumi.getter(name="safeEntitlementsEnabled")
def safe_entitlements_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "safe_entitlements_enabled")
@safe_entitlements_enabled.setter
def safe_entitlements_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "safe_entitlements_enabled", value)
@property
@pulumi.getter(name="skipIfBlank")
def skip_if_blank(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "skip_if_blank")
@skip_if_blank.setter
def skip_if_blank(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_if_blank", value)
@property
@pulumi.getter(name="userAttributeMacros")
def user_attribute_macros(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "user_attribute_macros")
@user_attribute_macros.setter
def user_attribute_macros(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_attribute_macros", value)
@property
@pulumi.getter(name="userAttributeMappings")
def user_attribute_mappings(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "user_attribute_mappings")
@user_attribute_mappings.setter
def user_attribute_mappings(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_attribute_mappings", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class PrivilegePrivilegeArgs:
def __init__(__self__, *,
statements: pulumi.Input[Sequence[pulumi.Input['PrivilegePrivilegeStatementArgs']]],
version: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "statements", statements)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def statements(self) -> pulumi.Input[Sequence[pulumi.Input['PrivilegePrivilegeStatementArgs']]]:
return pulumi.get(self, "statements")
@statements.setter
def statements(self, value: pulumi.Input[Sequence[pulumi.Input['PrivilegePrivilegeStatementArgs']]]):
pulumi.set(self, "statements", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class PrivilegePrivilegeStatementArgs:
def __init__(__self__, *,
actions: pulumi.Input[Sequence[pulumi.Input[str]]],
effect: pulumi.Input[str],
scopes: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(__self__, "actions", actions)
pulumi.set(__self__, "effect", effect)
pulumi.set(__self__, "scopes", scopes)
@property
@pulumi.getter
def actions(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
return pulumi.get(self, "actions")
@actions.setter
def actions(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "actions", value)
@property
@pulumi.getter
def effect(self) -> pulumi.Input[str]:
return pulumi.get(self, "effect")
@effect.setter
def effect(self, value: pulumi.Input[str]):
pulumi.set(self, "effect", value)
@property
@pulumi.getter
def scopes(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
return pulumi.get(self, "scopes")
@scopes.setter
def scopes(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "scopes", value)
@pulumi.input_type
class SamlAppParameterArgs:
def __init__(__self__, *,
param_key_name: pulumi.Input[str],
attributes_transformations: Optional[pulumi.Input[str]] = None,
default_values: Optional[pulumi.Input[str]] = None,
include_in_saml_assertion: Optional[pulumi.Input[bool]] = None,
label: Optional[pulumi.Input[str]] = None,
param_id: Optional[pulumi.Input[int]] = None,
provisioned_entitlements: Optional[pulumi.Input[bool]] = None,
safe_entitlements_enabled: Optional[pulumi.Input[bool]] = None,
skip_if_blank: Optional[pulumi.Input[bool]] = None,
user_attribute_macros: Optional[pulumi.Input[str]] = None,
user_attribute_mappings: Optional[pulumi.Input[str]] = None,
values: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "param_key_name", param_key_name)
if attributes_transformations is not None:
pulumi.set(__self__, "attributes_transformations", attributes_transformations)
if default_values is not None:
pulumi.set(__self__, "default_values", default_values)
if include_in_saml_assertion is not None:
pulumi.set(__self__, "include_in_saml_assertion", include_in_saml_assertion)
if label is not None:
pulumi.set(__self__, "label", label)
if param_id is not None:
pulumi.set(__self__, "param_id", param_id)
if provisioned_entitlements is not None:
pulumi.set(__self__, "provisioned_entitlements", provisioned_entitlements)
if safe_entitlements_enabled is not None:
pulumi.set(__self__, "safe_entitlements_enabled", safe_entitlements_enabled)
if skip_if_blank is not None:
pulumi.set(__self__, "skip_if_blank", skip_if_blank)
if user_attribute_macros is not None:
pulumi.set(__self__, "user_attribute_macros", user_attribute_macros)
if user_attribute_mappings is not None:
pulumi.set(__self__, "user_attribute_mappings", user_attribute_mappings)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter(name="paramKeyName")
def param_key_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "param_key_name")
@param_key_name.setter
def param_key_name(self, value: pulumi.Input[str]):
pulumi.set(self, "param_key_name", value)
@property
@pulumi.getter(name="attributesTransformations")
def attributes_transformations(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "attributes_transformations")
@attributes_transformations.setter
def attributes_transformations(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "attributes_transformations", value)
@property
@pulumi.getter(name="defaultValues")
def default_values(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "default_values")
@default_values.setter
def default_values(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_values", value)
@property
@pulumi.getter(name="includeInSamlAssertion")
def include_in_saml_assertion(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "include_in_saml_assertion")
@include_in_saml_assertion.setter
def include_in_saml_assertion(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "include_in_saml_assertion", value)
@property
@pulumi.getter
def label(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "label")
@label.setter
def label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "label", value)
@property
@pulumi.getter(name="paramId")
def param_id(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "param_id")
@param_id.setter
def param_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "param_id", value)
@property
@pulumi.getter(name="provisionedEntitlements")
def provisioned_entitlements(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "provisioned_entitlements")
@provisioned_entitlements.setter
def provisioned_entitlements(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "provisioned_entitlements", value)
@property
@pulumi.getter(name="safeEntitlementsEnabled")
def safe_entitlements_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "safe_entitlements_enabled")
@safe_entitlements_enabled.setter
def safe_entitlements_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "safe_entitlements_enabled", value)
@property
@pulumi.getter(name="skipIfBlank")
def skip_if_blank(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "skip_if_blank")
@skip_if_blank.setter
def skip_if_blank(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_if_blank", value)
@property
@pulumi.getter(name="userAttributeMacros")
def user_attribute_macros(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "user_attribute_macros")
@user_attribute_macros.setter
def user_attribute_macros(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_attribute_macros", value)
@property
@pulumi.getter(name="userAttributeMappings")
def user_attribute_mappings(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "user_attribute_mappings")
@user_attribute_mappings.setter
def user_attribute_mappings(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_attribute_mappings", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class SmartHookConditionArgs:
def __init__(__self__, *,
operator: pulumi.Input[str],
source: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "operator", operator)
pulumi.set(__self__, "source", source)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def source(self) -> pulumi.Input[str]:
return pulumi.get(self, "source")
@source.setter
def source(self, value: pulumi.Input[str]):
pulumi.set(self, "source", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SmartHookOptionsArgs:
def __init__(__self__, *,
location_enabled: Optional[pulumi.Input[bool]] = None,
mfa_device_info_enabled: Optional[pulumi.Input[bool]] = None,
risk_enabled: Optional[pulumi.Input[bool]] = None):
if location_enabled is not None:
pulumi.set(__self__, "location_enabled", location_enabled)
if mfa_device_info_enabled is not None:
pulumi.set(__self__, "mfa_device_info_enabled", mfa_device_info_enabled)
if risk_enabled is not None:
pulumi.set(__self__, "risk_enabled", risk_enabled)
@property
@pulumi.getter(name="locationEnabled")
def location_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "location_enabled")
@location_enabled.setter
def location_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "location_enabled", value)
@property
@pulumi.getter(name="mfaDeviceInfoEnabled")
def mfa_device_info_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "mfa_device_info_enabled")
@mfa_device_info_enabled.setter
def mfa_device_info_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "mfa_device_info_enabled", value)
@property
@pulumi.getter(name="riskEnabled")
def risk_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "risk_enabled")
@risk_enabled.setter
def risk_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "risk_enabled", value)
@pulumi.input_type
class UserMappingActionArgs:
def __init__(__self__, *,
action: pulumi.Input[str],
values: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def action(self) -> pulumi.Input[str]:
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input[str]):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def values(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
return pulumi.get(self, "values")
@values.setter
def values(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class UserMappingConditionArgs:
def __init__(__self__, *,
operator: pulumi.Input[str],
source: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "operator", operator)
pulumi.set(__self__, "source", source)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def source(self) -> pulumi.Input[str]:
return pulumi.get(self, "source")
@source.setter
def source(self, value: pulumi.Input[str]):
pulumi.set(self, "source", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
| 37.717822
| 105
| 0.674925
| 3,498
| 30,476
| 5.616924
| 0.037736
| 0.120928
| 0.082044
| 0.059955
| 0.905232
| 0.876883
| 0.851232
| 0.841103
| 0.825478
| 0.796061
| 0
| 0.000041
| 0.206523
| 30,476
| 807
| 106
| 37.76456
| 0.812464
| 0.005808
| 0
| 0.790199
| 1
| 0
| 0.120325
| 0.072393
| 0
| 0
| 0
| 0
| 0.041348
| 1
| 0.20827
| false
| 0
| 0.007657
| 0.094946
| 0.32925
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e097de517b2c361d30ffe5d0741175110f0b0b79
| 39
|
py
|
Python
|
pytorch_utilities/loss/__init__.py
|
bradezard131/pytorch-utilities
|
4fd123df3911a33a8c02bf6e3b418c77892519a7
|
[
"MIT"
] | 1
|
2020-07-22T02:22:36.000Z
|
2020-07-22T02:22:36.000Z
|
pytorch_utilities/loss/__init__.py
|
bradezard131/pytorch-utilities
|
4fd123df3911a33a8c02bf6e3b418c77892519a7
|
[
"MIT"
] | null | null | null |
pytorch_utilities/loss/__init__.py
|
bradezard131/pytorch-utilities
|
4fd123df3911a33a8c02bf6e3b418c77892519a7
|
[
"MIT"
] | null | null | null |
from .loss.focal_loss import focal_loss
| 39
| 39
| 0.871795
| 7
| 39
| 4.571429
| 0.571429
| 0.5625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 39
| 1
| 39
| 39
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e0cae08069ecd355b1271d0d445165a0cbc5cf15
| 35,290
|
py
|
Python
|
Examples/FlowerDance/Scales.py
|
Authey/MidiUtilHelper
|
c03fb47fa09af15f0ba4fa15e1bb4a04b277ec0b
|
[
"MIT"
] | 1
|
2020-12-30T15:07:34.000Z
|
2020-12-30T15:07:34.000Z
|
Examples/SakuraTears/Scales.py
|
Authey/MidiUtilHelper
|
c03fb47fa09af15f0ba4fa15e1bb4a04b277ec0b
|
[
"MIT"
] | null | null | null |
Examples/SakuraTears/Scales.py
|
Authey/MidiUtilHelper
|
c03fb47fa09af15f0ba4fa15e1bb4a04b277ec0b
|
[
"MIT"
] | 1
|
2020-12-28T21:47:26.000Z
|
2020-12-28T21:47:26.000Z
|
# Author: Authey
# Date: 10/06/2020
# ------------------------------------------------------------------------------ #
# # || C | C# | D | D# | E | F | F# | G | G# | A | A# | B #
# ------------------------------------------------------------------------------ #
# 0 || 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 #
# 1 || 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 #
# 2 || 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 #
# 3 || 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 #
# 4 || 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 #
# 5 || 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 #
# 6 || 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 #
# 7 || 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 #
# 8 || 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 #
# 9 || 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 #
# 10 || 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | #
# ------------------------------------------------------------------------------ #
from midiutil import MIDIFile
class Scales:
def __init__(self, tempo=120, volume=100):
self.tempo = tempo # In BPM
self.track = 0
self.time = 0
self.volume = volume # 0-127, as per the MIDI standard
self.channel = 0
self.MyMIDI = MIDIFile(1) # One track, defaults to format 1
self.MyMIDI.addTempo(self.track, self.time, self.tempo)
# ------------------------------------------ C ------------------------------------------- #
# ------------------------------- C0 ------------------------------- #
def c0(self, duration=1):
pitch = 0
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- C1 ------------------------------- #
def c1(self, duration=1):
pitch = 12
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- C2 ------------------------------- #
def c2_0d125(self, duration=1):
pitch = 24
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- C3 ------------------------------- #
def c3(self, duration=1):
pitch = 36
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- C4 ------------------------------- #
def c4(self, duration=1):
pitch = 48
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- C5 ------------------------------- #
def c5(self, duration=1):
pitch = 60
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- C6 ------------------------------- #
def c6(self, duration=1):
pitch = 72
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- C7 ------------------------------- #
def c7(self, duration=1):
pitch = 84
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- C8 ------------------------------- #
def c8(self, duration=1):
pitch = 96
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- C9 ------------------------------- #
def c9(self, duration=1):
pitch = 108
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- C10 ------------------------------- #
def c10(self, duration=1):
pitch = 120
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ----------------------------------------- C# ------------------------------------------- #
# ------------------------------- C#0 ------------------------------- #
def c0s(self, duration=1):
pitch = 1
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- C#1 ------------------------------- #
def c1s(self, duration=1):
pitch = 13
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- C#2 ------------------------------- #
def c2s(self, duration=1):
pitch = 25
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- C#3 ------------------------------- #
def c3s(self, duration=1):
pitch = 37
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- C#4 ------------------------------- #
def c4s(self, duration=1):
pitch = 49
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- C#5 ------------------------------- #
def c5s(self, duration=1):
pitch = 61
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- C#6 ------------------------------- #
def c6s(self, duration=1):
pitch = 73
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- C#7 ------------------------------- #
def c7s(self, duration=1):
pitch = 85
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- C#8 ------------------------------- #
def c8s(self, duration=1):
pitch = 97
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- C#9 ------------------------------- #
def c9s(self, duration=1):
pitch = 109
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- C#10 ------------------------------- #
def c10s(self, duration=1):
pitch = 121
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------------------ D ------------------------------------------- #
# ------------------------------- D0 ------------------------------- #
def d0(self, duration=1):
pitch = 2
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- D1 ------------------------------- #
def d1(self, duration=1):
pitch = 14
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- D2 ------------------------------- #
def d2(self, duration=1):
pitch = 26
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- D3 ------------------------------- #
def d3(self, duration=1):
pitch = 38
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- D4 ------------------------------- #
def d4(self, duration=1):
pitch = 50
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- D5 ------------------------------- #
def d5(self, duration=1):
pitch = 62
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- D6 ------------------------------- #
def d6(self, duration=1):
pitch = 74
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- D7 ------------------------------- #
def d7(self, duration=1):
pitch = 86
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- D8 ------------------------------- #
def d8(self, duration=1):
pitch = 98
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- D9 ------------------------------- #
def d9(self, duration=1):
pitch = 110
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- D10 ------------------------------- #
def d10(self, duration=1):
pitch = 122
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------------------ D# ------------------------------------------- #
# ------------------------------- D#0 ------------------------------- #
def d0s(self, duration=1):
pitch = 3
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- D#1 ------------------------------- #
def d1s(self, duration=1):
pitch = 15
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- D#2 ------------------------------- #
def d2s(self, duration=1):
pitch = 27
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- D#3 ------------------------------- #
def d3s(self, duration=1):
pitch = 39
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- D#4 ------------------------------- #
def d4s(self, duration=1):
pitch = 51
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- D#5 ------------------------------- #
def d5s(self, duration=1):
pitch = 63
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- D#6 ------------------------------- #
def d6s(self, duration=1):
pitch = 75
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- D#7 ------------------------------- #
def d7s(self, duration=1):
pitch = 87
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- D#8 ------------------------------- #
def d8s(self, duration=1):
pitch = 99
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- D#9 ------------------------------- #
def d9s(self, duration=1):
pitch = 111
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- D#10 ------------------------------- #
def d10s(self, duration=1):
pitch = 123
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------------------ E ------------------------------------------- #
# ------------------------------- E0 ------------------------------- #
def e0(self, duration=1):
pitch = 4
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- E1 ------------------------------- #
def e1(self, duration=1):
pitch = 16
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- E2 ------------------------------- #
def e2(self, duration=1):
pitch = 28
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- E3 ------------------------------- #
def e3(self, duration=1):
pitch = 40
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- E4 ------------------------------- #
def e4(self, duration=1):
pitch = 52
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- E5 ------------------------------- #
def e5(self, duration=1):
pitch = 64
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- E6 ------------------------------- #
def e6(self, duration=1):
pitch = 76
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- E7 ------------------------------- #
def e7(self, duration=1):
pitch = 88
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- E8 ------------------------------- #
def e8(self, duration=1):
pitch = 100
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- E9 ------------------------------- #
def e9(self, duration=1):
pitch = 112
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- E10 ------------------------------- #
def e10(self, duration=1):
pitch = 124
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------------------ F ------------------------------------------- #
# ------------------------------- F0 ------------------------------- #
def f0(self, duration=1):
pitch = 5
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- F1 ------------------------------- #
def f1(self, duration=1):
pitch = 17
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- F2 ------------------------------- #
def f2(self, duration=1):
pitch = 29
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- F3 ------------------------------- #
def f3(self, duration=1):
pitch = 41
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- F4 ------------------------------- #
def f4(self, duration=1):
pitch = 53
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- F5 ------------------------------- #
def f5(self, duration=1):
pitch = 65
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- F6 ------------------------------- #
def f6(self, duration=1):
pitch = 77
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- F7 ------------------------------- #
def f7(self, duration=1):
pitch = 89
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- F8 ------------------------------- #
def f8(self, duration=1):
pitch = 101
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- F9 ------------------------------- #
def f9(self, duration=1):
pitch = 113
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- F10 ------------------------------- #
def f10(self, duration=1):
pitch = 125
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------------------ F# ------------------------------------------- #
# ------------------------------- F#0 ------------------------------- #
def f0s(self, duration=1):
pitch = 6
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- F#1 ------------------------------- #
def f1s(self, duration=1):
pitch = 18
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- F#2 ------------------------------- #
def f2s(self, duration=1):
pitch = 30
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- F#3 ------------------------------- #
def f3s(self, duration=1):
pitch = 42
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- F#4 ------------------------------- #
def f4s(self, duration=1):
pitch = 54
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- F#5 ------------------------------- #
def f5s(self, duration=1):
pitch = 66
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- F#6 ------------------------------- #
def f6s(self, duration=1):
pitch = 78
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- F#7 ------------------------------- #
def f7s(self, duration=1):
pitch = 90
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- F#8 ------------------------------- #
def f8s(self, duration=1):
pitch = 102
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- F#9 ------------------------------- #
def f9s(self, duration=1):
pitch = 114
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- F#10 ------------------------------- #
def f10s(self, duration=1):
pitch = 126
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------------------ G ------------------------------------------- #
# ------------------------------- G0 ------------------------------- #
def g0(self, duration=1):
pitch = 7
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- G1 ------------------------------- #
def g1(self, duration=1):
pitch = 19
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- G2 ------------------------------- #
def g2(self, duration=1):
pitch = 31
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- G3 ------------------------------- #
def g3(self, duration=1):
pitch = 43
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- G4 ------------------------------- #
def g4(self, duration=1):
pitch = 55
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- G5 ------------------------------- #
def g5(self, duration=1):
pitch = 67
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- G6 ------------------------------- #
def g6(self, duration=1):
pitch = 79
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- G7 ------------------------------- #
def g7(self, duration=1):
pitch = 91
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- G8 ------------------------------- #
def g8(self, duration=1):
pitch = 103
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- G9 ------------------------------- #
def g9(self, duration=1):
pitch = 115
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- G10 ------------------------------- #
def g10(self, duration=1):
pitch = 127
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------------------ G# ------------------------------------------- #
# ------------------------------- G#0 ------------------------------- #
def g0s(self, duration=1):
pitch = 8
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- G#1 ------------------------------- #
def g1s(self, duration=1):
pitch = 20
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- G#2 ------------------------------- #
def g2s(self, duration=1):
pitch = 32
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- G#3 ------------------------------- #
def g3s(self, duration=1):
pitch = 44
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- G#4 ------------------------------- #
def g4s(self, duration=1):
pitch = 56
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- G#5 ------------------------------- #
def g5s(self, duration=1):
pitch = 68
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- G#6 ------------------------------- #
def g6s(self, duration=1):
pitch = 80
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- G#7 ------------------------------- #
def g7s(self, duration=1):
pitch = 92
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- G#8 ------------------------------- #
def g8s(self, duration=1):
pitch = 104
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- G#9 ------------------------------- #
def g9s(self, duration=1):
pitch = 116
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------------------ A ------------------------------------------- #
# ------------------------------- A0 ------------------------------- #
def a0(self, duration=1):
pitch = 9
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- A1 ------------------------------- #
def a1(self, duration=1):
pitch = 21
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- A2 ------------------------------- #
def a2(self, duration=1):
pitch = 33
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- A3 ------------------------------- #
def a3(self, duration=1):
pitch = 45
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- A4 ------------------------------- #
def a4(self, duration=1):
pitch = 57
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- A5 ------------------------------- #
def a5(self, duration=1):
pitch = 69
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- A6 ------------------------------- #
def a6(self, duration=1):
pitch = 81
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- A7 ------------------------------- #
def a7(self, duration=1):
pitch = 93
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- A8 ------------------------------- #
def a8(self, duration=1):
pitch = 105
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- A9 ------------------------------- #
def a9(self, duration=1):
pitch = 117
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------------------ A# ------------------------------------------- #
# ------------------------------- A#0 ------------------------------- #
def a0s(self, duration=1):
pitch = 10
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- A#1 ------------------------------- #
def a1s(self, duration=1):
pitch = 22
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- A#2 ------------------------------- #
def a2s(self, duration=1):
pitch = 34
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- A#3 ------------------------------- #
def a3s(self, duration=1):
pitch = 46
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- A#4 ------------------------------- #
def a4s(self, duration=1):
pitch = 58
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- A#5 ------------------------------- #
def a5s(self, duration=1):
pitch = 70
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- A#6 ------------------------------- #
def a6s(self, duration=1):
pitch = 82
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- A#7 ------------------------------- #
def a7s(self, duration=1):
pitch = 94
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- A#8 ------------------------------- #
def a8s(self, duration=1):
pitch = 106
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- A#9 ------------------------------- #
def a9s(self, duration=1):
pitch = 118
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------------------ B ------------------------------------------- #
# ------------------------------- B0 ------------------------------- #
def b0(self, duration=1):
pitch = 11
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- B1 ------------------------------- #
def b1(self, duration=1):
pitch = 23
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- B2 ------------------------------- #
def b2(self, duration=1):
pitch = 35
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- B3 ------------------------------- #
def b3(self, duration=1):
pitch = 47
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- B4 ------------------------------- #
def b4(self, duration=1):
pitch = 59
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- B5 ------------------------------- #
def b5(self, duration=1):
pitch = 71
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- B6 ------------------------------- #
def b6(self, duration=1):
pitch = 83
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- B7 ------------------------------- #
def b7(self, duration=1):
pitch = 95
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- B8 ------------------------------- #
def b8(self, duration=1):
pitch = 107
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
# ------------------------------- B9 ------------------------------- #
def b9(self, duration=1):
pitch = 119
self.MyMIDI.addNote(self.track, self.channel, pitch, self.time, duration, self.volume)
self.time += duration
def modify_time(self, duration=1, direction=0):
if direction == 0:
self.time += duration
elif direction == 1:
self.time -= duration
else:
print('Unknown Direction')
def get_midi(self):
return self.MyMIDI
| 42.162485
| 97
| 0.425985
| 3,382
| 35,290
| 4.442933
| 0.09107
| 0.138427
| 0.274724
| 0.153334
| 0.719353
| 0.719353
| 0.719353
| 0.719353
| 0.719353
| 0.719353
| 0
| 0.035859
| 0.216889
| 35,290
| 836
| 98
| 42.212919
| 0.507852
| 0.31414
| 0
| 0.483992
| 0
| 0
| 0.000714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.246704
| false
| 0
| 0.001883
| 0.001883
| 0.252354
| 0.001883
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e0ee7ebc042f63f795bf0abc9be69c0f33b9178c
| 1,429
|
py
|
Python
|
tests/integration/test_cli_compile.py
|
jorisvandenbossche/sphinx-theme-builder
|
6eb7129bded4e235ee0f4496477dda33aa9121e9
|
[
"MIT"
] | 1
|
2021-11-24T08:52:00.000Z
|
2021-11-24T08:52:00.000Z
|
tests/integration/test_cli_compile.py
|
jorisvandenbossche/sphinx-theme-builder
|
6eb7129bded4e235ee0f4496477dda33aa9121e9
|
[
"MIT"
] | null | null | null |
tests/integration/test_cli_compile.py
|
jorisvandenbossche/sphinx-theme-builder
|
6eb7129bded4e235ee0f4496477dda33aa9121e9
|
[
"MIT"
] | null | null | null |
from unittest import mock
from click import Group
from click.testing import CliRunner
class TestCompileCommand:
def test_calls_generate_assets(self, runner: CliRunner, cli: Group) -> None:
with mock.patch(
"sphinx_theme_builder._internal.cli.compile.generate_assets"
) as mocked_generate_assets, mock.patch(
"sphinx_theme_builder._internal.cli.compile.Project"
) as mocked_project:
with runner.isolated_filesystem():
process = runner.invoke(cli, ["compile"])
assert process.exit_code == 0, process
mocked_generate_assets.assert_has_calls(
[
mock.call(mocked_project.from_cwd(), production=False),
]
)
def test_calls_generate_assets_in_production(
self, runner: CliRunner, cli: Group
) -> None:
with mock.patch(
"sphinx_theme_builder._internal.cli.compile.generate_assets"
) as mocked_generate_assets, mock.patch(
"sphinx_theme_builder._internal.cli.compile.Project"
) as mocked_project:
with runner.isolated_filesystem():
process = runner.invoke(cli, ["compile", "--production"])
assert process.exit_code == 0, process
mocked_generate_assets.assert_has_calls(
[
mock.call(mocked_project.from_cwd(), production=True),
]
)
| 33.232558
| 80
| 0.63331
| 152
| 1,429
| 5.664474
| 0.289474
| 0.130081
| 0.069686
| 0.092915
| 0.852497
| 0.792102
| 0.792102
| 0.792102
| 0.792102
| 0.792102
| 0
| 0.001938
| 0.277817
| 1,429
| 42
| 81
| 34.02381
| 0.832364
| 0
| 0
| 0.470588
| 1
| 0
| 0.169349
| 0.151155
| 0
| 0
| 0
| 0
| 0.117647
| 1
| 0.058824
| false
| 0
| 0.088235
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1cac222724a8ca6d15230ad4b91e567b49ff4e73
| 3,566
|
py
|
Python
|
tests/snapshots/snap_test_management_command_partition.py
|
MontyD/django-postgres-extra
|
9a79983c5e27510bf6dd60ef4a1441e7e2e4320d
|
[
"MIT"
] | null | null | null |
tests/snapshots/snap_test_management_command_partition.py
|
MontyD/django-postgres-extra
|
9a79983c5e27510bf6dd60ef4a1441e7e2e4320d
|
[
"MIT"
] | null | null | null |
tests/snapshots/snap_test_management_command_partition.py
|
MontyD/django-postgres-extra
|
9a79983c5e27510bf6dd60ef4a1441e7e2e4320d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import GenericRepr, Snapshot
snapshots = Snapshot()
snapshots['test_management_command_partition_auto_confirm[--yes] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nOperations applied.\\n', err='')")
snapshots['test_management_command_partition_auto_confirm[-y] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nOperations applied.\\n', err='')")
snapshots['test_management_command_partition_confirm_no[NO] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operation aborted.\\n', err='')")
snapshots['test_management_command_partition_confirm_no[N] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operation aborted.\\n', err='')")
snapshots['test_management_command_partition_confirm_no[No] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operation aborted.\\n', err='')")
snapshots['test_management_command_partition_confirm_no[n] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operation aborted.\\n', err='')")
snapshots['test_management_command_partition_confirm_no[no] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operation aborted.\\n', err='')")
snapshots['test_management_command_partition_confirm_yes[YES] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operations applied.\\n', err='')")
snapshots['test_management_command_partition_confirm_yes[Y] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operations applied.\\n', err='')")
snapshots['test_management_command_partition_confirm_yes[y] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operations applied.\\n', err='')")
snapshots['test_management_command_partition_confirm_yes[yes] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\nDo you want to proceed? (y/N) Operations applied.\\n', err='')")
snapshots['test_management_command_partition_dry_run[--dry] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\n', err='')")
snapshots['test_management_command_partition_dry_run[-d] 1'] = GenericRepr("CaptureResult(out='test:\\n - tobedeleted\\n + tobecreated\\n\\n1 partitions will be deleted\\n1 partitions will be created\\n', err='')")
| 101.885714
| 273
| 0.747336
| 496
| 3,566
| 5.231855
| 0.104839
| 0.120231
| 0.160308
| 0.180347
| 0.948748
| 0.948748
| 0.948748
| 0.925241
| 0.925241
| 0.905588
| 0
| 0.01306
| 0.098149
| 3,566
| 34
| 274
| 104.882353
| 0.793843
| 0.017386
| 0
| 0
| 0
| 0.8125
| 0.844616
| 0.280206
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
e82eb22bd663b83750f9b6beda490a4e451e49fa
| 3,008
|
py
|
Python
|
NS3-master/src/dsr/bindings/callbacks_list.py
|
legendPerceptor/blockchain
|
615ba331ae5ec53c683dfe6a16992a5181be0fea
|
[
"Apache-2.0"
] | 1
|
2021-09-20T07:05:25.000Z
|
2021-09-20T07:05:25.000Z
|
NS3-master/src/dsr/bindings/callbacks_list.py
|
legendPerceptor/blockchain
|
615ba331ae5ec53c683dfe6a16992a5181be0fea
|
[
"Apache-2.0"
] | null | null | null |
NS3-master/src/dsr/bindings/callbacks_list.py
|
legendPerceptor/blockchain
|
615ba331ae5ec53c683dfe6a16992a5181be0fea
|
[
"Apache-2.0"
] | 2
|
2021-09-02T08:25:16.000Z
|
2022-01-03T08:48:38.000Z
|
callback_classes = [
['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Socket>', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'const ns3::Address &', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'ns3::Ipv4Address', 'ns3::Ipv4Address', 'unsigned char', 'ns3::Ptr<ns3::Ipv4Route>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'ns3::Ipv6Address', 'ns3::Ipv6Address', 'unsigned char', 'ns3::Ptr<ns3::Ipv6Route>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'const ns3::Ipv4Header &', 'ns3::Ptr<const ns3::Packet>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<const ns3::Packet>', 'ns3::Ptr<ns3::Ipv4>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'const ns3::Ipv4Header &', 'ns3::Ptr<const ns3::Packet>', 'ns3::Ipv4L3Protocol::DropReason', 'ns3::Ptr<ns3::Ipv4>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Mac48Address', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<const ns3::Packet>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<const ns3::ArpCache>', 'ns3::Ipv4Address', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'const ns3::WifiMacHeader &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ipv4Address', 'unsigned char', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'const ns3::dsr::DsrOptionSRHeader &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
| 143.238095
| 210
| 0.58012
| 398
| 3,008
| 4.38191
| 0.077889
| 0.545872
| 0.637041
| 0.926606
| 0.865826
| 0.820528
| 0.804472
| 0.804472
| 0.804472
| 0.804472
| 0
| 0.06946
| 0.095412
| 3,008
| 20
| 211
| 150.4
| 0.571481
| 0
| 0
| 0
| 0
| 0
| 0.717088
| 0.101729
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
08fc6d9441bc81f3079b9ecc97aa5d7a5ee1aed5
| 100
|
py
|
Python
|
src/server/models/__init__.py
|
daniel3303/sirs-project
|
38a36ecf2373775c3a866f185dacb7597ad1e3cc
|
[
"Apache-2.0"
] | null | null | null |
src/server/models/__init__.py
|
daniel3303/sirs-project
|
38a36ecf2373775c3a866f185dacb7597ad1e3cc
|
[
"Apache-2.0"
] | null | null | null |
src/server/models/__init__.py
|
daniel3303/sirs-project
|
38a36ecf2373775c3a866f185dacb7597ad1e3cc
|
[
"Apache-2.0"
] | null | null | null |
from server.models.User import *
from server.models.File import *
from server.models.Role import *
| 25
| 33
| 0.78
| 15
| 100
| 5.2
| 0.466667
| 0.384615
| 0.615385
| 0.564103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13
| 100
| 3
| 34
| 33.333333
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
08fcd105ac2911e02ad4062147f1cae2f7a22486
| 15,654
|
py
|
Python
|
tests/test_serializers.py
|
scieloorg/oai-pmh
|
9d3044921d2d5cafb18e54f04070e8783f49c06d
|
[
"BSD-2-Clause"
] | 2
|
2019-03-16T04:40:29.000Z
|
2022-03-10T14:50:21.000Z
|
tests/test_serializers.py
|
DalavanCloud/oai-pmh
|
9d3044921d2d5cafb18e54f04070e8783f49c06d
|
[
"BSD-2-Clause"
] | 27
|
2017-08-23T17:11:57.000Z
|
2021-06-01T21:57:31.000Z
|
tests/test_serializers.py
|
DalavanCloud/oai-pmh
|
9d3044921d2d5cafb18e54f04070e8783f49c06d
|
[
"BSD-2-Clause"
] | 2
|
2017-06-12T16:18:35.000Z
|
2019-03-16T04:40:12.000Z
|
import unittest
from unittest.mock import patch
from datetime import datetime
from oaipmh import serializers, validators, formatters
class SchemaValidatorMixin:
def assertXMLIsValid(self, xml_bytes):
validator = validators.OAIValidator(xml_bytes)
is_valid, errors = validator.validate()
if not is_valid:
raise self.failureException('the XML is invalid: %s' % errors)
class MakeIdentifyTests(SchemaValidatorMixin, unittest.TestCase):
def setUp(self):
self.data = {
'repository': {
'repositoryName': 'SciELO Brazil',
'baseURL': 'https://oai.scielo.br/',
'protocolVersion': '2.0',
'adminEmail': 'scielo-dev@googlegroups.com',
'earliestDatestamp': datetime(1909, 4, 1),
'deletedRecord': 'no',
'granularity': 'YYYY-MM-DD',
},
'request': {'verb': 'Identify'},
}
@patch('oaipmh.serializers.datetime')
def test_correct_usage(self, mock_utc):
mock_utc.utcnow.return_value = datetime(2017, 6, 22, 19, 1, 43)
expected = b'<?xml version=\'1.0\' encoding=\'utf-8\'?>\n<OAI-PMH xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.openarchives.org/OAI/2.0/" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd"><responseDate>2017-06-22T19:01:43Z</responseDate><request verb="Identify">https://oai.scielo.br/</request><Identify><repositoryName>SciELO Brazil</repositoryName><baseURL>https://oai.scielo.br/</baseURL><protocolVersion>2.0</protocolVersion><adminEmail>scielo-dev@googlegroups.com</adminEmail><earliestDatestamp>1909-04-01</earliestDatestamp><deletedRecord>no</deletedRecord><granularity>YYYY-MM-DD</granularity></Identify></OAI-PMH>'
self.assertEqual(expected,
serializers.serialize_identify(self.data))
def test_xml_validity(self):
self.assertXMLIsValid(
serializers.serialize_identify(self.data))
class MakeListMetadataFormatsTests(SchemaValidatorMixin, unittest.TestCase):
def setUp(self):
self.data = {
'repository': {
'repositoryName': 'SciELO Brazil',
'baseURL': 'https://oai.scielo.br/',
'protocolVersion': '2.0',
'adminEmail': 'scielo-dev@googlegroups.com',
'earliestDatestamp': datetime(1909, 4, 1),
'deletedRecord': 'no',
'granularity': 'YYYY-MM-DD',
},
'request': {'verb': 'ListMetadataFormats'},
'formats': [
{
'metadataPrefix': 'oai_dc',
'schema': 'http://www.openarchives.org/OAI/2.0/oai_dc.xsd',
'metadataNamespace': 'http://www.openarchives.org/OAI/2.0/oai_dc/',
},
]
}
@patch('oaipmh.serializers.datetime')
def test_correct_usage(self, mock_utc):
mock_utc.utcnow.return_value = datetime(2017, 6, 22, 19, 1, 43)
expected = b'<?xml version=\'1.0\' encoding=\'utf-8\'?>\n<OAI-PMH xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.openarchives.org/OAI/2.0/" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd"><responseDate>2017-06-22T19:01:43Z</responseDate><request verb="ListMetadataFormats">https://oai.scielo.br/</request><ListMetadataFormats><metadataFormat><metadataPrefix>oai_dc</metadataPrefix><schema>http://www.openarchives.org/OAI/2.0/oai_dc.xsd</schema><metadataNamespace>http://www.openarchives.org/OAI/2.0/oai_dc/</metadataNamespace></metadataFormat></ListMetadataFormats></OAI-PMH>'
self.assertEqual(expected,
serializers.serialize_list_metadata_formats(self.data))
def test_xml_validity(self):
self.assertXMLIsValid(
serializers.serialize_list_metadata_formats(self.data))
class MakeListIdentifiersTests(SchemaValidatorMixin, unittest.TestCase):
def setUp(self):
self.data = {
'repository': {
'repositoryName': 'SciELO Brazil',
'baseURL': 'https://oai.scielo.br/',
'protocolVersion': '2.0',
'adminEmail': 'scielo-dev@googlegroups.com',
'earliestDatestamp': datetime(1909, 4, 1),
'deletedRecord': 'no',
'granularity': 'YYYY-MM-DD',
},
'request': {'verb': 'ListIdentifiers'},
'resources': [
{
'ridentifier': 'oai:arXiv:cs/0112017',
'datestamp': datetime(2017, 6, 14),
'setspec': ['set1', 'set2'],
'title': [('en', 'MICROBIAL COUNTS OF DARK RED...')],
'creator': ['Vieira, Francisco Cleber Sousa'],
'subject': [('en', 'bacteria'), ('pt', 'bactéria')],
'description': [('en', 'The number of colony forming units (CFU)...')],
'publisher': ['Sociedade Brasileira de Microbiologia'],
'contributor': ['Evans, R. J.'],
'date': ['1998-09-01'],
'type': ['research-article'],
'format': ['text/html'],
'identifier': ['https://ref.scielo.org/7vy47j'],
'source': ['Revista de Microbiologia v.29 n.3 1998'],
'language': ['en'],
'relation': [],
'rights': ['http://creativecommons.org/licenses/by-nc/4.0/'],
},
]
}
@patch('oaipmh.serializers.datetime')
def test_correct_usage(self, mock_utc):
mock_utc.utcnow.return_value = datetime(2017, 6, 22, 19, 1, 43)
expected = b'<?xml version=\'1.0\' encoding=\'utf-8\'?>\n<OAI-PMH xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.openarchives.org/OAI/2.0/" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd"><responseDate>2017-06-22T19:01:43Z</responseDate><request verb="ListIdentifiers">https://oai.scielo.br/</request><ListIdentifiers><header><identifier>oai:arXiv:cs/0112017</identifier><datestamp>2017-06-14</datestamp><setSpec>set1</setSpec><setSpec>set2</setSpec></header><resumptionToken></resumptionToken></ListIdentifiers></OAI-PMH>'
self.assertEqual(expected,
serializers.serialize_list_identifiers(self.data))
def test_xml_validity(self):
self.assertXMLIsValid(
serializers.serialize_list_identifiers(self.data))
class MakeListRecordsTests(SchemaValidatorMixin, unittest.TestCase):
def setUp(self):
self.data = {
'repository': {
'repositoryName': 'SciELO Brazil',
'baseURL': 'https://oai.scielo.br/',
'protocolVersion': '2.0',
'adminEmail': 'scielo-dev@googlegroups.com',
'earliestDatestamp': datetime(1909, 4, 1),
'deletedRecord': 'no',
'granularity': 'YYYY-MM-DD',
},
'request': {'verb': 'ListRecords'},
'resources': [
{
'ridentifier': 'oai:arXiv:cs/0112017',
'datestamp': datetime(2017, 6, 14),
'setspec': ['set1', 'set2'],
'title': [('en', 'MICROBIAL COUNTS OF DARK RED...')],
'creator': ['Vieira, Francisco Cleber Sousa'],
'subject': [('en', 'bacteria'), ('pt', 'bactéria')],
'description': [('en', 'The number of colony forming units (CFU)...')],
'publisher': ['Sociedade Brasileira de Microbiologia'],
'contributor': ['Evans, R. J.'],
'date': [datetime(1998, 9, 1)],
'type': ['research-article'],
'format': ['text/html'],
'identifier': ['https://ref.scielo.org/7vy47j'],
'source': ['Revista de Microbiologia v.29 n.3 1998'],
'language': ['en'],
'relation': [],
'rights': ['http://creativecommons.org/licenses/by-nc/4.0/'],
},
]
}
@patch('oaipmh.serializers.datetime')
def test_correct_usage(self, mock_utc):
mock_utc.utcnow.return_value = datetime(2017, 6, 22, 19, 1, 43)
expected = b'<?xml version=\'1.0\' encoding=\'utf-8\'?>\n<OAI-PMH xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.openarchives.org/OAI/2.0/" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd"><responseDate>2017-06-22T19:01:43Z</responseDate><request verb="ListRecords">https://oai.scielo.br/</request><ListRecords><record><header><identifier>oai:arXiv:cs/0112017</identifier><datestamp>2017-06-14</datestamp><setSpec>set1</setSpec><setSpec>set2</setSpec></header><metadata><oai_dc:dc xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd"><dc:title>MICROBIAL COUNTS OF DARK RED...</dc:title><dc:creator>Vieira, Francisco Cleber Sousa</dc:creator><dc:contributor>Evans, R. J.</dc:contributor><dc:description>The number of colony forming units (CFU)...</dc:description><dc:subject>bacteria</dc:subject><dc:subject>bact\xc3\xa9ria</dc:subject><dc:publisher>Sociedade Brasileira de Microbiologia</dc:publisher><dc:date>1998-09-01</dc:date><dc:type>research-article</dc:type><dc:source>Revista de Microbiologia v.29 n.3 1998</dc:source><dc:format>text/html</dc:format><dc:identifier>https://ref.scielo.org/7vy47j</dc:identifier><dc:rights>http://creativecommons.org/licenses/by-nc/4.0/</dc:rights><dc:language>en</dc:language></oai_dc:dc></metadata></record><resumptionToken></resumptionToken></ListRecords></OAI-PMH>'
self.assertEqual(expected,
serializers.serialize_list_records(self.data, formatters.oai_dc.make_metadata))
def test_xml_validity(self):
self.assertXMLIsValid(
serializers.serialize_list_records(self.data, formatters.oai_dc.make_metadata))
class MakeGetRecordTests(SchemaValidatorMixin, unittest.TestCase):
def setUp(self):
self.data = {
'repository': {
'repositoryName': 'SciELO Brazil',
'baseURL': 'https://oai.scielo.br/',
'protocolVersion': '2.0',
'adminEmail': 'scielo-dev@googlegroups.com',
'earliestDatestamp': datetime(1909, 4, 1),
'deletedRecord': 'no',
'granularity': 'YYYY-MM-DD',
},
'request': {'verb': 'GetRecord'},
'resources': [
{
'ridentifier': 'oai:arXiv:cs/0112017',
'datestamp': datetime(2017, 6, 14),
'setspec': ['set1', 'set2'],
'title': [('en', 'MICROBIAL COUNTS OF DARK RED...')],
'creator': ['Vieira, Francisco Cleber Sousa'],
'subject': [('en', 'bacteria'), ('pt', 'bactéria')],
'description': [('en', 'The number of colony forming units (CFU)...')],
'publisher': ['Sociedade Brasileira de Microbiologia'],
'contributor': ['Evans, R. J.'],
'date': [datetime(1998, 9, 1)],
'type': ['research-article'],
'format': ['text/html'],
'identifier': ['https://ref.scielo.org/7vy47j'],
'source': ['Revista de Microbiologia v.29 n.3 1998'],
'language': ['en'],
'relation': [],
'rights': ['http://creativecommons.org/licenses/by-nc/4.0/'],
},
]
}
@patch('oaipmh.serializers.datetime')
def test_correct_usage(self, mock_utc):
mock_utc.utcnow.return_value = datetime(2017, 6, 22, 19, 1, 43)
expected = b'<?xml version=\'1.0\' encoding=\'utf-8\'?>\n<OAI-PMH xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.openarchives.org/OAI/2.0/" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd"><responseDate>2017-06-22T19:01:43Z</responseDate><request verb="GetRecord">https://oai.scielo.br/</request><GetRecord><record><header><identifier>oai:arXiv:cs/0112017</identifier><datestamp>2017-06-14</datestamp><setSpec>set1</setSpec><setSpec>set2</setSpec></header><metadata><oai_dc:dc xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd"><dc:title>MICROBIAL COUNTS OF DARK RED...</dc:title><dc:creator>Vieira, Francisco Cleber Sousa</dc:creator><dc:contributor>Evans, R. J.</dc:contributor><dc:description>The number of colony forming units (CFU)...</dc:description><dc:subject>bacteria</dc:subject><dc:subject>bact\xc3\xa9ria</dc:subject><dc:publisher>Sociedade Brasileira de Microbiologia</dc:publisher><dc:date>1998-09-01</dc:date><dc:type>research-article</dc:type><dc:source>Revista de Microbiologia v.29 n.3 1998</dc:source><dc:format>text/html</dc:format><dc:identifier>https://ref.scielo.org/7vy47j</dc:identifier><dc:rights>http://creativecommons.org/licenses/by-nc/4.0/</dc:rights><dc:language>en</dc:language></oai_dc:dc></metadata></record></GetRecord></OAI-PMH>'
self.assertEqual(expected,
serializers.serialize_get_record(self.data, formatters.oai_dc.make_metadata))
def test_xml_validity(self):
self.assertXMLIsValid(
serializers.serialize_get_record(self.data, formatters.oai_dc.make_metadata))
class MakeListSetsTests(SchemaValidatorMixin, unittest.TestCase):
def setUp(self):
self.data = {
'repository': {
'repositoryName': 'SciELO Brazil',
'baseURL': 'https://oai.scielo.br/',
'protocolVersion': '2.0',
'adminEmail': 'scielo-dev@googlegroups.com',
'earliestDatestamp': datetime(1909, 4, 1),
'deletedRecord': 'no',
'granularity': 'YYYY-MM-DD',
},
'request': {'verb': 'ListSets'},
'sets': [
{
'setSpec': 'foo',
'setName': 'bar',
},
],
}
@patch('oaipmh.serializers.datetime')
def test_correct_usage(self, mock_utc):
mock_utc.utcnow.return_value = datetime(2017, 6, 22, 19, 1, 43)
expected = b'<?xml version=\'1.0\' encoding=\'utf-8\'?>\n<OAI-PMH xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.openarchives.org/OAI/2.0/" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd"><responseDate>2017-06-22T19:01:43Z</responseDate><request verb="ListSets">https://oai.scielo.br/</request><ListSets><set><setSpec>foo</setSpec><setName>bar</setName></set><resumptionToken></resumptionToken></ListSets></OAI-PMH>'
self.assertEqual(expected,
serializers.serialize_list_sets(self.data))
def test_xml_validity(self):
self.assertXMLIsValid(
serializers.serialize_list_sets(self.data))
| 61.873518
| 1,560
| 0.596716
| 1,736
| 15,654
| 5.325461
| 0.117512
| 0.007572
| 0.057545
| 0.066631
| 0.872039
| 0.84835
| 0.843375
| 0.830827
| 0.80941
| 0.80941
| 0
| 0.043308
| 0.231506
| 15,654
| 252
| 1,561
| 62.119048
| 0.725187
| 0
| 0
| 0.71875
| 0
| 0.026786
| 0.533444
| 0.06478
| 0
| 0
| 0
| 0
| 0.058036
| 1
| 0.084821
| false
| 0
| 0.017857
| 0
| 0.133929
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1c07531d91e8c33e79c7bfb62b5613820854b6fd
| 1,407
|
py
|
Python
|
biobb_analysis/test/unitests/test_ambertools/test_cpptraj_rms.py
|
bioexcel/biobb_analysis
|
794683daf65eb13ddaaaf6cf3c19da6d1322a949
|
[
"Apache-2.0"
] | 3
|
2019-05-18T14:52:30.000Z
|
2020-10-18T06:20:00.000Z
|
biobb_analysis/test/unitests/test_ambertools/test_cpptraj_rms.py
|
bioexcel/biobb_analysis
|
794683daf65eb13ddaaaf6cf3c19da6d1322a949
|
[
"Apache-2.0"
] | 7
|
2019-03-04T15:04:28.000Z
|
2021-06-17T10:57:25.000Z
|
biobb_analysis/test/unitests/test_ambertools/test_cpptraj_rms.py
|
bioexcel/biobb_analysis
|
794683daf65eb13ddaaaf6cf3c19da6d1322a949
|
[
"Apache-2.0"
] | null | null | null |
from biobb_common.tools import test_fixtures as fx
from biobb_analysis.ambertools.cpptraj_rms import cpptraj_rms
class TestCpptrajRmsFirst():
def setUp(self):
fx.test_setup(self,'cpptraj_rms_first')
def tearDown(self):
fx.test_teardown(self)
pass
def test_rms_first(self):
cpptraj_rms(properties=self.properties, **self.paths)
assert fx.not_empty(self.paths['output_cpptraj_path'])
assert fx.equal(self.paths['output_cpptraj_path'], self.paths['ref_output_cpptraj_path'])
class TestCpptrajRmsAverage():
def setUp(self):
fx.test_setup(self,'cpptraj_rms_average')
def tearDown(self):
fx.test_teardown(self)
pass
def test_rms_average(self):
cpptraj_rms(properties=self.properties, **self.paths)
assert fx.not_empty(self.paths['output_cpptraj_path'])
assert fx.equal(self.paths['output_cpptraj_path'], self.paths['ref_output_cpptraj_path'])
class TestCpptrajRmsExperimental():
def setUp(self):
fx.test_setup(self,'cpptraj_rms_experimental')
def tearDown(self):
fx.test_teardown(self)
pass
def test_rms_experimental(self):
cpptraj_rms(properties=self.properties, **self.paths)
assert fx.not_empty(self.paths['output_cpptraj_path'])
assert fx.equal(self.paths['output_cpptraj_path'], self.paths['ref_output_cpptraj_path'])
| 32.72093
| 97
| 0.7086
| 183
| 1,407
| 5.174863
| 0.185792
| 0.114044
| 0.161563
| 0.139388
| 0.774023
| 0.774023
| 0.774023
| 0.774023
| 0.774023
| 0.656811
| 0
| 0
| 0.180526
| 1,407
| 42
| 98
| 33.5
| 0.821336
| 0
| 0
| 0.65625
| 0
| 0
| 0.172708
| 0.066098
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0.28125
| false
| 0.09375
| 0.0625
| 0
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
1c8152edbd7f89bf44d67576929f62f77c41a708
| 43,435
|
py
|
Python
|
pynetdicom/tests/test_dimse_c.py
|
RandaNP/pynetdicom
|
18bd3aa92a6a2b23b2ba6e62e1c4c2b324360d7c
|
[
"MIT"
] | null | null | null |
pynetdicom/tests/test_dimse_c.py
|
RandaNP/pynetdicom
|
18bd3aa92a6a2b23b2ba6e62e1c4c2b324360d7c
|
[
"MIT"
] | null | null | null |
pynetdicom/tests/test_dimse_c.py
|
RandaNP/pynetdicom
|
18bd3aa92a6a2b23b2ba6e62e1c4c2b324360d7c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Test DIMSE-C operations."""
from io import BytesIO
import logging
import pytest
from pydicom.dataset import Dataset
from pydicom.uid import UID
from pynetdicom import _config
from pynetdicom.dimse_messages import (
C_STORE_RQ, C_STORE_RSP,C_MOVE_RQ, C_MOVE_RSP, C_ECHO_RQ, C_ECHO_RSP,
C_FIND_RQ, C_FIND_RSP, C_GET_RQ, C_GET_RSP
)
from pynetdicom.dimse_primitives import (
C_ECHO, C_MOVE, C_STORE, C_GET, C_FIND, C_CANCEL
)
from pynetdicom.dsutils import encode
from pynetdicom.utils import validate_ae_title
#from pynetdicom.utils import pretty_bytes
from .encoded_dimse_msg import (
c_echo_rq_cmd, c_echo_rsp_cmd, c_store_rq_cmd_b, c_store_rq_ds_b,
c_store_rsp_cmd, c_find_rq_cmd, c_find_rq_ds, c_find_rsp_cmd,
c_find_rsp_ds, c_get_rq_cmd, c_get_rq_ds, c_get_rsp_cmd, c_get_rsp_ds,
c_move_rq_cmd, c_move_rq_ds, c_move_rsp_cmd, c_move_rsp_ds
)
LOGGER = logging.getLogger('pynetdicom')
LOGGER.setLevel(logging.CRITICAL)
class TestPrimitive_C_CANCEL(object):
"""Test DIMSE C-CANCEL operations."""
def test_assignment(self):
""" Check assignment works correctly """
primitive = C_CANCEL()
primitive.MessageIDBeingRespondedTo = 13
assert primitive.MessageIDBeingRespondedTo == 13
with pytest.raises(ValueError):
primitive.MessageIDBeingRespondedTo = 100000
with pytest.raises(TypeError):
primitive.MessageIDBeingRespondedTo = 'test'
class TestPrimitive_C_STORE(object):
"""Test DIMSE C-STORE operations."""
def setup(self):
self.default_conformance = _config.ENFORCE_UID_CONFORMANCE
self.default_aet_length = _config.USE_SHORT_DIMSE_AET
def teardown(self):
_config.ENFORCE_UID_CONFORMANCE = self.default_conformance
_config.USE_SHORT_DIMSE_AET = self.default_aet_length
def test_assignment(self):
""" Check assignment works correctly """
primitive = C_STORE()
primitive.MessageID = 11
assert primitive.MessageID == 11
primitive.MessageIDBeingRespondedTo = 13
assert primitive.MessageIDBeingRespondedTo == 13
# AffectedSOPClassUID
primitive.AffectedSOPClassUID = '1.1.1'
assert primitive.AffectedSOPClassUID == UID('1.1.1')
assert isinstance(primitive.AffectedSOPClassUID, UID)
primitive.AffectedSOPClassUID = UID('1.1.2')
assert primitive.AffectedSOPClassUID == UID('1.1.2')
assert isinstance(primitive.AffectedSOPClassUID, UID)
primitive.AffectedSOPClassUID = b'1.1.3'
assert primitive.AffectedSOPClassUID == UID('1.1.3')
assert isinstance(primitive.AffectedSOPClassUID, UID)
# AffectedSOPInstanceUID
primitive.AffectedSOPInstanceUID = b'1.2.1'
assert primitive.AffectedSOPInstanceUID == UID('1.2.1')
assert isinstance(primitive.AffectedSOPClassUID, UID)
primitive.AffectedSOPInstanceUID = UID('1.2.2')
assert primitive.AffectedSOPInstanceUID == UID('1.2.2')
assert isinstance(primitive.AffectedSOPClassUID, UID)
primitive.AffectedSOPInstanceUID = '1.2.3'
assert primitive.AffectedSOPInstanceUID == UID('1.2.3')
assert isinstance(primitive.AffectedSOPClassUID, UID)
primitive.Priority = 0x02
assert primitive.Priority == 0x02
primitive.MoveOriginatorApplicationEntityTitle = 'UNITTEST_SCP'
assert primitive.MoveOriginatorApplicationEntityTitle == b'UNITTEST_SCP'
primitive.MoveOriginatorApplicationEntityTitle = b'UNITTEST_SCP'
assert primitive.MoveOriginatorApplicationEntityTitle == b'UNITTEST_SCP'
primitive.MoveOriginatorApplicationEntityTitle = ''
assert primitive.MoveOriginatorApplicationEntityTitle is None
primitive.MoveOriginatorApplicationEntityTitle = b''
assert primitive.MoveOriginatorApplicationEntityTitle is None
primitive.MoveOriginatorApplicationEntityTitle = ' '
assert primitive.MoveOriginatorApplicationEntityTitle is None
primitive.MoveOriginatorApplicationEntityTitle = b' '
assert primitive.MoveOriginatorApplicationEntityTitle is None
primitive.MoveOriginatorMessageID = 15
assert primitive.MoveOriginatorMessageID == 15
ref_ds = Dataset()
ref_ds.PatientID = 1234567
primitive.DataSet = BytesIO(encode(ref_ds, True, True))
#assert primitive.DataSet, ref_ds)
primitive.Status = 0x0000
assert primitive.Status == 0x0000
primitive.Status = 0xC123
assert primitive.Status == 0xC123
primitive.Status = 0xEE01
assert primitive.Status == 0xEE01
def test_uid_exceptions_false(self):
"""Test ValueError raised with ENFORCE_UID_CONFORMANCE = False."""
primitive = C_STORE()
_config.ENFORCE_UID_CONFORMANCE = False
primitive.AffectedSOPClassUID = 'abc'
assert primitive.AffectedSOPClassUID == 'abc'
primitive.AffectedSOPInstanceUID = 'abc'
assert primitive.AffectedSOPInstanceUID == 'abc'
# Can't have more than 64 characters
with pytest.raises(ValueError):
primitive.AffectedSOPClassUID = 'abc' * 22
with pytest.raises(ValueError):
primitive.AffectedSOPInstanceUID = 'abc' * 22
def test_uid_exceptions_true(self):
"""Test ValueError raised with ENFORCE_UID_CONFORMANCE = True."""
primitive = C_STORE()
_config.ENFORCE_UID_CONFORMANCE = True
with pytest.raises(ValueError):
primitive.AffectedSOPClassUID = 'abc'
with pytest.raises(ValueError):
primitive.AffectedSOPInstanceUID = 'abc'
def test_exceptions(self):
""" Check incorrect types/values for properties raise exceptions """
primitive = C_STORE()
# MessageID
with pytest.raises(TypeError):
primitive.MessageID = 'halp'
with pytest.raises(TypeError):
primitive.MessageID = 1.111
with pytest.raises(ValueError):
primitive.MessageID = 65536
with pytest.raises(ValueError):
primitive.MessageID = -1
# MessageIDBeingRespondedTo
with pytest.raises(TypeError):
primitive.MessageIDBeingRespondedTo = 'halp'
with pytest.raises(TypeError):
primitive.MessageIDBeingRespondedTo = 1.111
with pytest.raises(ValueError):
primitive.MessageIDBeingRespondedTo = 65536
with pytest.raises(ValueError):
primitive.MessageIDBeingRespondedTo = -1
# AffectedSOPClassUID
with pytest.raises(TypeError):
primitive.AffectedSOPClassUID = 45.2
with pytest.raises(TypeError):
primitive.AffectedSOPClassUID = 100
# AffectedSOPInstanceUID
with pytest.raises(TypeError):
primitive.AffectedSOPInstanceUID = 45.2
with pytest.raises(TypeError):
primitive.AffectedSOPInstanceUID = 100
# Priority
with pytest.raises(ValueError):
primitive.Priority = 45.2
with pytest.raises(ValueError):
primitive.Priority = 'abc'
with pytest.raises(ValueError):
primitive.Priority = -1
with pytest.raises(ValueError):
primitive.Priority = 3
# MoveOriginatorApplicationEntityTitle
with pytest.raises(TypeError):
primitive.MoveOriginatorApplicationEntityTitle = 45.2
with pytest.raises(TypeError):
primitive.MoveOriginatorApplicationEntityTitle = 100
primitive.MoveOriginatorApplicationEntityTitle = ''
assert primitive.MoveOriginatorApplicationEntityTitle is None
#with pytest.raises(ValueError):
# primitive.MoveOriginatorApplicationEntityTitle = ' '
# MoveOriginatorMessageID
with pytest.raises(TypeError):
primitive.MoveOriginatorMessageID = 'halp'
with pytest.raises(TypeError):
primitive.MoveOriginatorMessageID = 1.111
with pytest.raises(ValueError):
primitive.MoveOriginatorMessageID = 65536
with pytest.raises(ValueError):
primitive.MoveOriginatorMessageID = -1
# DataSet
msg = r"'DataSet' parameter must be a BytesIO object"
with pytest.raises(TypeError, match=msg):
primitive.DataSet = 'halp'
with pytest.raises(TypeError):
primitive.DataSet = 1.111
with pytest.raises(TypeError):
primitive.DataSet = 50
with pytest.raises(TypeError):
primitive.DataSet = [30, 10]
# Status
with pytest.raises(TypeError):
primitive.Status = 19.4
def test_conversion_rq(self):
""" Check conversion to a -RQ PDU produces the correct output """
primitive = C_STORE()
primitive.MessageID = 7
primitive.AffectedSOPClassUID = '1.2.840.10008.5.1.4.1.1.2'
primitive.AffectedSOPInstanceUID = '1.2.392.200036.9116.2.6.1.48.' \
'1215709044.1459316254.522441'
primitive.Priority = 0x02
primitive.MoveOriginatorApplicationEntityTitle = 'UNITTEST_SCP'
primitive.MoveOriginatorMessageID = 3
ref_ds = Dataset()
ref_ds.PatientID = 'Test1101'
ref_ds.PatientName = "Tube HeNe"
primitive.DataSet = BytesIO(encode(ref_ds, True, True))
dimse_msg = C_STORE_RQ()
dimse_msg.primitive_to_message(primitive)
pdvs = []
for fragment in dimse_msg.encode_msg(1, 16382):
pdvs.append(fragment)
cs_pdv = pdvs[0].presentation_data_value_list[0][1]
ds_pdv = pdvs[1].presentation_data_value_list[0][1]
assert cs_pdv == c_store_rq_cmd_b
assert ds_pdv == c_store_rq_ds_b
def test_conversion_rsp(self):
""" Check conversion to a -RSP PDU produces the correct output """
primitive = C_STORE()
primitive.MessageIDBeingRespondedTo = 5
primitive.AffectedSOPClassUID = '1.2.4.10'
primitive.AffectedSOPInstanceUID = '1.2.4.5.7.8'
primitive.Status = 0x0000
dimse_msg = C_STORE_RSP()
dimse_msg.primitive_to_message(primitive)
pdvs = []
for fragment in dimse_msg.encode_msg(1, 16382):
pdvs.append(fragment)
cs_pdv = pdvs[0].presentation_data_value_list[0][1]
assert cs_pdv == c_store_rsp_cmd
def test_is_valid_request(self):
"""Test C_STORE.is_valid_request"""
primitive = C_STORE()
assert not primitive.is_valid_request
primitive.MessageID = 1
assert not primitive.is_valid_request
primitive.AffectedSOPClassUID = '1.2'
assert not primitive.is_valid_request
primitive.Priority = 2
assert not primitive.is_valid_request
primitive.AffectedSOPInstanceUID = '1.2.1'
assert not primitive.is_valid_request
primitive.DataSet = BytesIO()
assert primitive.is_valid_request
def test_is_valid_resposne(self):
"""Test C_STORE.is_valid_response."""
primitive = C_STORE()
assert not primitive.is_valid_response
primitive.MessageIDBeingRespondedTo = 1
assert not primitive.is_valid_response
primitive.Status = 0x0000
assert primitive.is_valid_response
def test_aet_short_false(self):
"""Test using long AE titles."""
primitive = C_STORE()
_config.USE_SHORT_DIMSE_AET = False
primitive.MoveOriginatorApplicationEntityTitle = b'A'
aet = primitive.MoveOriginatorApplicationEntityTitle
assert b'A ' == aet
def test_aet_short_true(self):
"""Test using short AE titles."""
primitive = C_STORE()
_config.USE_SHORT_DIMSE_AET = True
primitive.MoveOriginatorApplicationEntityTitle = b'A'
aet = primitive.MoveOriginatorApplicationEntityTitle
assert b'A' == aet
primitive.MoveOriginatorApplicationEntityTitle = b'ABCDEFGHIJKLMNO'
aet = primitive.MoveOriginatorApplicationEntityTitle
assert b'ABCDEFGHIJKLMNO' == aet
primitive.MoveOriginatorApplicationEntityTitle = b'ABCDEFGHIJKLMNOP'
aet = primitive.MoveOriginatorApplicationEntityTitle
assert b'ABCDEFGHIJKLMNOP' == aet
primitive.MoveOriginatorApplicationEntityTitle = b'ABCDEFGHIJKLMNOPQ'
aet = primitive.MoveOriginatorApplicationEntityTitle
assert b'ABCDEFGHIJKLMNOP' == aet
class TestPrimitive_C_FIND(object):
"""Test DIMSE C-FIND operations."""
def setup(self):
self.default_conformance = _config.ENFORCE_UID_CONFORMANCE
def teardown(self):
_config.ENFORCE_UID_CONFORMANCE = self.default_conformance
def test_assignment(self):
""" Check assignment works correctly """
primitive = C_FIND()
primitive.MessageID = 11
assert primitive.MessageID == 11
primitive.MessageIDBeingRespondedTo = 13
assert primitive.MessageIDBeingRespondedTo == 13
# AffectedSOPClassUID
primitive.AffectedSOPClassUID = '1.1.1'
assert primitive.AffectedSOPClassUID == UID('1.1.1')
assert isinstance(primitive.AffectedSOPClassUID, UID)
primitive.AffectedSOPClassUID = UID('1.1.2')
assert primitive.AffectedSOPClassUID == UID('1.1.2')
assert isinstance(primitive.AffectedSOPClassUID, UID)
primitive.AffectedSOPClassUID = b'1.1.3'
assert primitive.AffectedSOPClassUID == UID('1.1.3')
assert isinstance(primitive.AffectedSOPClassUID, UID)
primitive.Priority = 0x02
assert primitive.Priority == 0x02
ref_ds = Dataset()
ref_ds.PatientID = '*'
ref_ds.QueryRetrieveLevel = "PATIENT"
primitive.Identifier = BytesIO(encode(ref_ds, True, True))
#assert primitive.DataSet, ref_ds)
primitive.Status = 0x0000
assert primitive.Status == 0x0000
primitive.Status = 0xC123
assert primitive.Status == 0xC123
primitive.Status = 0xEE01
assert primitive.Status == 0xEE01
def test_uid_exceptions_false(self):
"""Test ValueError raised with ENFORCE_UID_CONFORMANCE = False."""
primitive = C_FIND()
_config.ENFORCE_UID_CONFORMANCE = False
primitive.AffectedSOPClassUID = 'abc'
assert primitive.AffectedSOPClassUID == 'abc'
# Can't have more than 64 characters
with pytest.raises(ValueError):
primitive.AffectedSOPClassUID = 'abc' * 22
def test_uid_exceptions_true(self):
"""Test ValueError raised with ENFORCE_UID_CONFORMANCE = True."""
primitive = C_FIND()
_config.ENFORCE_UID_CONFORMANCE = True
with pytest.raises(ValueError):
primitive.AffectedSOPClassUID = 'abc'
def test_exceptions(self):
""" Check incorrect types/values for properties raise exceptions """
primitive = C_FIND()
# MessageID
with pytest.raises(TypeError):
primitive.MessageID = 'halp'
with pytest.raises(TypeError):
primitive.MessageID = 1.111
with pytest.raises(ValueError):
primitive.MessageID = 65536
with pytest.raises(ValueError):
primitive.MessageID = -1
# MessageIDBeingRespondedTo
with pytest.raises(TypeError):
primitive.MessageIDBeingRespondedTo = 'halp'
with pytest.raises(TypeError):
primitive.MessageIDBeingRespondedTo = 1.111
with pytest.raises(ValueError):
primitive.MessageIDBeingRespondedTo = 65536
with pytest.raises(ValueError):
primitive.MessageIDBeingRespondedTo = -1
# AffectedSOPClassUID
with pytest.raises(TypeError):
primitive.AffectedSOPClassUID = 45.2
with pytest.raises(TypeError):
primitive.AffectedSOPClassUID = 100
# Priority
with pytest.raises(ValueError):
primitive.Priority = 45.2
with pytest.raises(ValueError):
primitive.Priority = 'abc'
with pytest.raises(ValueError):
primitive.Priority = -1
with pytest.raises(ValueError):
primitive.Priority = 3
# Identifier
msg = r"'Identifier' parameter must be a BytesIO object"
with pytest.raises(TypeError, match=msg):
primitive.Identifier = 'halp'
with pytest.raises(TypeError):
primitive.Identifier = 1.111
with pytest.raises(TypeError):
primitive.Identifier = 50
with pytest.raises(TypeError):
primitive.Identifier = [30, 10]
# Status
with pytest.raises(TypeError):
primitive.Status = 19.4
def test_conversion_rq(self):
""" Check conversion to a -RQ PDU produces the correct output """
primitive = C_FIND()
primitive.MessageID = 7
primitive.AffectedSOPClassUID = '1.2.840.10008.5.1.4.1.1.2'
primitive.Priority = 0x02
ref_identifier = Dataset()
ref_identifier.PatientID = '*'
ref_identifier.QueryRetrieveLevel = "PATIENT"
primitive.Identifier = BytesIO(encode(ref_identifier, True, True))
dimse_msg = C_FIND_RQ()
dimse_msg.primitive_to_message(primitive)
pdvs = []
for fragment in dimse_msg.encode_msg(1, 16382):
pdvs.append(fragment)
cs_pdv = pdvs[0].presentation_data_value_list[0][1]
ds_pdv = pdvs[1].presentation_data_value_list[0][1]
assert cs_pdv == c_find_rq_cmd
assert ds_pdv == c_find_rq_ds
def test_conversion_rsp(self):
""" Check conversion to a -RSP PDU produces the correct output """
primitive = C_FIND()
primitive.MessageIDBeingRespondedTo = 5
primitive.AffectedSOPClassUID = '1.2.840.10008.5.1.4.1.1.2'
primitive.Status = 0xFF00
ref_identifier = Dataset()
ref_identifier.QueryRetrieveLevel = "PATIENT"
ref_identifier.RetrieveAETitle = validate_ae_title("FINDSCP")
ref_identifier.PatientName = "ANON^A^B^C^D"
primitive.Identifier = BytesIO(encode(ref_identifier, True, True))
dimse_msg = C_FIND_RSP()
dimse_msg.primitive_to_message(primitive)
pdvs = []
for fragment in dimse_msg.encode_msg(1, 16382):
pdvs.append(fragment)
cs_pdv = pdvs[0].presentation_data_value_list[0][1]
ds_pdv = pdvs[1].presentation_data_value_list[0][1]
assert cs_pdv == c_find_rsp_cmd
assert ds_pdv == c_find_rsp_ds
def test_is_valid_request(self):
"""Test C_FIND.is_valid_request"""
primitive = C_FIND()
assert not primitive.is_valid_request
primitive.MessageID = 1
assert not primitive.is_valid_request
primitive.AffectedSOPClassUID = '1.2'
assert not primitive.is_valid_request
primitive.Priority = 2
assert not primitive.is_valid_request
primitive.Identifier = BytesIO()
assert primitive.is_valid_request
def test_is_valid_resposne(self):
"""Test C_FIND.is_valid_response."""
primitive = C_FIND()
assert not primitive.is_valid_response
primitive.MessageIDBeingRespondedTo = 1
assert not primitive.is_valid_response
primitive.Status = 0x0000
assert primitive.is_valid_response
class TestPrimitive_C_GET(object):
"""Test DIMSE C-GET operations."""
def setup(self):
self.default_conformance = _config.ENFORCE_UID_CONFORMANCE
def teardown(self):
_config.ENFORCE_UID_CONFORMANCE = self.default_conformance
def test_assignment(self):
""" Check assignment works correctly """
primitive = C_GET()
primitive.MessageID = 11
assert primitive.MessageID == 11
primitive.MessageIDBeingRespondedTo = 13
assert primitive.MessageIDBeingRespondedTo == 13
# AffectedSOPClassUID
primitive.AffectedSOPClassUID = '1.1.1'
assert primitive.AffectedSOPClassUID == UID('1.1.1')
assert isinstance(primitive.AffectedSOPClassUID, UID)
primitive.AffectedSOPClassUID = UID('1.1.2')
assert primitive.AffectedSOPClassUID == UID('1.1.2')
assert isinstance(primitive.AffectedSOPClassUID, UID)
primitive.AffectedSOPClassUID = b'1.1.3'
assert primitive.AffectedSOPClassUID == UID('1.1.3')
assert isinstance(primitive.AffectedSOPClassUID, UID)
primitive.Priority = 0x02
assert primitive.Priority == 0x02
ref_ds = Dataset()
ref_ds.PatientID = 1234567
primitive.Identifier = BytesIO(encode(ref_ds, True, True))
#assert primitive.DataSet, ref_ds)
primitive.Status = 0x0000
assert primitive.Status == 0x0000
primitive.Status = 0xC123
assert primitive.Status == 0xC123
primitive.Status = 0xEE01
assert primitive.Status == 0xEE01
primitive.NumberOfRemainingSuboperations = 1
assert primitive.NumberOfRemainingSuboperations == 1
primitive.NumberOfCompletedSuboperations = 2
assert primitive.NumberOfCompletedSuboperations == 2
primitive.NumberOfFailedSuboperations = 3
assert primitive.NumberOfFailedSuboperations == 3
primitive.NumberOfWarningSuboperations = 4
assert primitive.NumberOfWarningSuboperations == 4
def test_uid_exceptions_false(self):
"""Test ValueError raised with ENFORCE_UID_CONFORMANCE = False."""
primitive = C_GET()
_config.ENFORCE_UID_CONFORMANCE = False
primitive.AffectedSOPClassUID = 'abc'
assert primitive.AffectedSOPClassUID == 'abc'
# Can't have more than 64 characters
with pytest.raises(ValueError):
primitive.AffectedSOPClassUID = 'abc' * 22
def test_uid_exceptions_true(self):
"""Test ValueError raised with ENFORCE_UID_CONFORMANCE = True."""
primitive = C_GET()
_config.ENFORCE_UID_CONFORMANCE = True
with pytest.raises(ValueError):
primitive.AffectedSOPClassUID = 'abc'
def test_exceptions(self):
""" Check incorrect types/values for properties raise exceptions """
primitive = C_GET()
# MessageID
with pytest.raises(TypeError):
primitive.MessageID = 'halp'
with pytest.raises(TypeError):
primitive.MessageID = 1.111
with pytest.raises(ValueError):
primitive.MessageID = 65536
with pytest.raises(ValueError):
primitive.MessageID = -1
# MessageIDBeingRespondedTo
with pytest.raises(TypeError):
primitive.MessageIDBeingRespondedTo = 'halp'
with pytest.raises(TypeError):
primitive.MessageIDBeingRespondedTo = 1.111
with pytest.raises(ValueError):
primitive.MessageIDBeingRespondedTo = 65536
with pytest.raises(ValueError):
primitive.MessageIDBeingRespondedTo = -1
# NumberOfRemainingSuboperations
with pytest.raises(TypeError):
primitive.NumberOfRemainingSuboperations = 'halp'
with pytest.raises(TypeError):
primitive.NumberOfRemainingSuboperations = 1.111
with pytest.raises(ValueError):
primitive.NumberOfRemainingSuboperations = -1
# NumberOfCompletedSuboperations
with pytest.raises(TypeError):
primitive.NumberOfCompletedSuboperations = 'halp'
with pytest.raises(TypeError):
primitive.NumberOfCompletedSuboperations = 1.111
with pytest.raises(ValueError):
primitive.NumberOfCompletedSuboperations = -1
# NumberOfFailedSuboperations
with pytest.raises(TypeError):
primitive.NumberOfFailedSuboperations = 'halp'
with pytest.raises(TypeError):
primitive.NumberOfFailedSuboperations = 1.111
with pytest.raises(ValueError):
primitive.NumberOfFailedSuboperations = -1
# NumberOfWarningSuboperations
with pytest.raises(TypeError):
primitive.NumberOfWarningSuboperations = 'halp'
with pytest.raises(TypeError):
primitive.NumberOfWarningSuboperations = 1.111
with pytest.raises(ValueError):
primitive.NumberOfWarningSuboperations = -1
# AffectedSOPClassUID
with pytest.raises(TypeError):
primitive.AffectedSOPClassUID = 45.2
with pytest.raises(TypeError):
primitive.AffectedSOPClassUID = 100
# Priority
with pytest.raises(ValueError):
primitive.Priority = 45.2
with pytest.raises(ValueError):
primitive.Priority = 'abc'
with pytest.raises(ValueError):
primitive.Priority = -1
with pytest.raises(ValueError):
primitive.Priority = 3
# Identifier
msg = r"'Identifier' parameter must be a BytesIO object"
with pytest.raises(TypeError, match=msg):
primitive.Identifier = 'halp'
with pytest.raises(TypeError):
primitive.Identifier = 1.111
with pytest.raises(TypeError):
primitive.Identifier = 50
with pytest.raises(TypeError):
primitive.Identifier = [30, 10]
# Status
with pytest.raises(TypeError):
primitive.Status = 19.4
def test_conversion_rq(self):
""" Check conversion to a -RQ PDU produces the correct output """
primitive = C_GET()
primitive.MessageID = 7
primitive.AffectedSOPClassUID = '1.2.840.10008.5.1.4.1.1.2'
primitive.Priority = 0x02
ref_identifier = Dataset()
ref_identifier.PatientID = '*'
ref_identifier.QueryRetrieveLevel = "PATIENT"
primitive.Identifier = BytesIO(encode(ref_identifier, True, True))
dimse_msg = C_GET_RQ()
dimse_msg.primitive_to_message(primitive)
pdvs = []
for fragment in dimse_msg.encode_msg(1, 16382):
pdvs.append(fragment)
cs_pdv = pdvs[0].presentation_data_value_list[0][1]
ds_pdv = pdvs[1].presentation_data_value_list[0][1]
assert cs_pdv == c_get_rq_cmd
assert ds_pdv == c_get_rq_ds
def test_conversion_rsp(self):
""" Check conversion to a -RSP PDU produces the correct output """
primitive = C_GET()
primitive.MessageIDBeingRespondedTo = 5
primitive.AffectedSOPClassUID = '1.2.840.10008.5.1.4.1.1.2'
primitive.Status = 0xFF00
primitive.NumberOfRemainingSuboperations = 3
primitive.NumberOfCompletedSuboperations = 1
primitive.NumberOfFailedSuboperations = 2
primitive.NumberOfWarningSuboperations = 4
ref_identifier = Dataset()
ref_identifier.QueryRetrieveLevel = "PATIENT"
ref_identifier.PatientID = "*"
primitive.Identifier = BytesIO(encode(ref_identifier, True, True))
dimse_msg = C_GET_RSP()
dimse_msg.primitive_to_message(primitive)
pdvs = []
for fragment in dimse_msg.encode_msg(1, 16382):
pdvs.append(fragment)
cs_pdv = pdvs[0].presentation_data_value_list[0][1]
ds_pdv = pdvs[1].presentation_data_value_list[0][1]
assert cs_pdv == c_get_rsp_cmd
assert ds_pdv == c_get_rsp_ds
def test_is_valid_request(self):
"""Test C_GET.is_valid_request"""
primitive = C_GET()
assert not primitive.is_valid_request
primitive.MessageID = 1
assert not primitive.is_valid_request
primitive.AffectedSOPClassUID = '1.2'
assert not primitive.is_valid_request
primitive.Priority = 2
assert not primitive.is_valid_request
primitive.Identifier = BytesIO()
assert primitive.is_valid_request
def test_is_valid_resposne(self):
"""Test C_GET.is_valid_response."""
primitive = C_GET()
assert not primitive.is_valid_response
primitive.MessageIDBeingRespondedTo = 1
assert not primitive.is_valid_response
primitive.Status = 0x0000
assert primitive.is_valid_response
class TestPrimitive_C_MOVE(object):
"""Test DIMSE C-MOVE operations."""
def setup(self):
self.default_conformance = _config.ENFORCE_UID_CONFORMANCE
self.default_aet_length = _config.USE_SHORT_DIMSE_AET
def teardown(self):
_config.ENFORCE_UID_CONFORMANCE = self.default_conformance
_config.USE_SHORT_DIMSE_AET = self.default_aet_length
def test_assignment(self):
""" Check assignment works correctly """
primitive = C_MOVE()
primitive.MessageID = 11
assert primitive.MessageID == 11
primitive.MessageIDBeingRespondedTo = 13
assert primitive.MessageIDBeingRespondedTo == 13
# AffectedSOPClassUID
primitive.AffectedSOPClassUID = '1.1.1'
assert primitive.AffectedSOPClassUID == UID('1.1.1')
assert isinstance(primitive.AffectedSOPClassUID, UID)
primitive.AffectedSOPClassUID = UID('1.1.2')
assert primitive.AffectedSOPClassUID == UID('1.1.2')
assert isinstance(primitive.AffectedSOPClassUID, UID)
primitive.AffectedSOPClassUID = b'1.1.3'
assert primitive.AffectedSOPClassUID == UID('1.1.3')
assert isinstance(primitive.AffectedSOPClassUID, UID)
primitive.Priority = 0x02
assert primitive.Priority == 0x02
primitive.MoveDestination = 'UNITTEST_SCP'
assert primitive.MoveDestination == b'UNITTEST_SCP'
ref_ds = Dataset()
ref_ds.PatientID = 1234567
primitive.Identifier = BytesIO(encode(ref_ds, True, True))
#assert primitive.DataSet, ref_ds)
primitive.Status = 0x0000
assert primitive.Status == 0x0000
primitive.Status = 0xC123
assert primitive.Status == 0xC123
primitive.Status = 0xEE01
assert primitive.Status == 0xEE01
def test_uid_exceptions_false(self):
"""Test ValueError raised with ENFORCE_UID_CONFORMANCE = False."""
primitive = C_MOVE()
_config.ENFORCE_UID_CONFORMANCE = False
primitive.AffectedSOPClassUID = 'abc'
assert primitive.AffectedSOPClassUID == 'abc'
# Can't have more than 64 characters
with pytest.raises(ValueError):
primitive.AffectedSOPClassUID = 'abc' * 22
def test_uid_exceptions_true(self):
"""Test ValueError raised with ENFORCE_UID_CONFORMANCE = True."""
primitive = C_MOVE()
_config.ENFORCE_UID_CONFORMANCE = True
with pytest.raises(ValueError):
primitive.AffectedSOPClassUID = 'abc'
def test_exceptions(self):
""" Check incorrect types/values for properties raise exceptions """
primitive = C_MOVE()
# MessageID
with pytest.raises(TypeError):
primitive.MessageID = 'halp'
with pytest.raises(TypeError):
primitive.MessageID = 1.111
with pytest.raises(ValueError):
primitive.MessageID = 65536
with pytest.raises(ValueError):
primitive.MessageID = -1
# MessageIDBeingRespondedTo
with pytest.raises(TypeError):
primitive.MessageIDBeingRespondedTo = 'halp'
with pytest.raises(TypeError):
primitive.MessageIDBeingRespondedTo = 1.111
with pytest.raises(ValueError):
primitive.MessageIDBeingRespondedTo = 65536
with pytest.raises(ValueError):
primitive.MessageIDBeingRespondedTo = -1
# NumberOfRemainingSuboperations
with pytest.raises(TypeError):
primitive.NumberOfRemainingSuboperations = 'halp'
with pytest.raises(TypeError):
primitive.NumberOfRemainingSuboperations = 1.111
with pytest.raises(ValueError):
primitive.NumberOfRemainingSuboperations = -1
# NumberOfCompletedSuboperations
with pytest.raises(TypeError):
primitive.NumberOfCompletedSuboperations = 'halp'
with pytest.raises(TypeError):
primitive.NumberOfCompletedSuboperations = 1.111
with pytest.raises(ValueError):
primitive.NumberOfCompletedSuboperations = -1
# NumberOfFailedSuboperations
with pytest.raises(TypeError):
primitive.NumberOfFailedSuboperations = 'halp'
with pytest.raises(TypeError):
primitive.NumberOfFailedSuboperations = 1.111
with pytest.raises(ValueError):
primitive.NumberOfFailedSuboperations = -1
# NumberOfWarningSuboperations
with pytest.raises(TypeError):
primitive.NumberOfWarningSuboperations = 'halp'
with pytest.raises(TypeError):
primitive.NumberOfWarningSuboperations = 1.111
with pytest.raises(ValueError):
primitive.NumberOfWarningSuboperations = -1
# AffectedSOPClassUID
with pytest.raises(TypeError):
primitive.AffectedSOPClassUID = 45.2
with pytest.raises(TypeError):
primitive.AffectedSOPClassUID = 100
# Priority
with pytest.raises(ValueError):
primitive.Priority = 45.2
with pytest.raises(ValueError):
primitive.Priority = 'abc'
with pytest.raises(ValueError):
primitive.Priority = -1
with pytest.raises(ValueError):
primitive.Priority = 3
# MoveDestination
with pytest.raises(TypeError):
primitive.MoveDestination = 45.2
with pytest.raises(TypeError):
primitive.MoveDestination = 100
with pytest.raises(ValueError):
primitive.MoveDestination = ''
with pytest.raises(ValueError):
primitive.MoveDestination = ' '
# Identifier
msg = r"'Identifier' parameter must be a BytesIO object"
with pytest.raises(TypeError, match=msg):
primitive.Identifier = 'halp'
with pytest.raises(TypeError):
primitive.Identifier = 1.111
with pytest.raises(TypeError):
primitive.Identifier = 50
with pytest.raises(TypeError):
primitive.Identifier = [30, 10]
# Status
with pytest.raises(TypeError):
primitive.Status = 19.4
def test_conversion_rq(self):
""" Check conversion to a -RQ PDU produces the correct output """
primitive = C_MOVE()
primitive.MessageID = 7
primitive.AffectedSOPClassUID = '1.2.840.10008.5.1.4.1.1.2'
primitive.Priority = 0x02
primitive.MoveDestination = validate_ae_title("MOVE_SCP")
ref_identifier = Dataset()
ref_identifier.PatientID = '*'
ref_identifier.QueryRetrieveLevel = "PATIENT"
primitive.Identifier = BytesIO(encode(ref_identifier, True, True))
dimse_msg = C_MOVE_RQ()
dimse_msg.primitive_to_message(primitive)
pdvs = []
for fragment in dimse_msg.encode_msg(1, 16382):
pdvs.append(fragment)
cs_pdv = pdvs[0].presentation_data_value_list[0][1]
ds_pdv = pdvs[1].presentation_data_value_list[0][1]
assert cs_pdv == c_move_rq_cmd
assert ds_pdv == c_move_rq_ds
def test_conversion_rsp(self):
""" Check conversion to a -RSP PDU produces the correct output """
primitive = C_MOVE()
primitive.MessageIDBeingRespondedTo = 5
primitive.AffectedSOPClassUID = '1.2.840.10008.5.1.4.1.1.2'
primitive.Status = 0xFF00
primitive.NumberOfRemainingSuboperations = 3
primitive.NumberOfCompletedSuboperations = 1
primitive.NumberOfFailedSuboperations = 2
primitive.NumberOfWarningSuboperations = 4
ref_identifier = Dataset()
ref_identifier.QueryRetrieveLevel = "PATIENT"
ref_identifier.PatientID = "*"
primitive.Identifier = BytesIO(encode(ref_identifier, True, True))
dimse_msg = C_MOVE_RSP()
dimse_msg.primitive_to_message(primitive)
pdvs = []
for fragment in dimse_msg.encode_msg(1, 16382):
pdvs.append(fragment)
cs_pdv = pdvs[0].presentation_data_value_list[0][1]
ds_pdv = pdvs[1].presentation_data_value_list[0][1]
assert cs_pdv == c_move_rsp_cmd
assert ds_pdv == c_move_rsp_ds
def test_is_valid_request(self):
"""Test C_MOVE.is_valid_request"""
primitive = C_MOVE()
assert not primitive.is_valid_request
primitive.MessageID = 1
assert not primitive.is_valid_request
primitive.AffectedSOPClassUID = '1.2'
assert not primitive.is_valid_request
primitive.Priority = 2
assert not primitive.is_valid_request
primitive.MoveDestination = b'1234567890123456'
assert not primitive.is_valid_request
primitive.Identifier = BytesIO()
assert primitive.is_valid_request
def test_is_valid_resposne(self):
"""Test C_MOVE.is_valid_response."""
primitive = C_MOVE()
assert not primitive.is_valid_response
primitive.MessageIDBeingRespondedTo = 1
assert not primitive.is_valid_response
primitive.Status = 0x0000
assert primitive.is_valid_response
def test_aet_short_false(self):
"""Test using long AE titles."""
primitive = C_MOVE()
_config.USE_SHORT_DIMSE_AET = False
primitive.MoveDestination = b'A'
assert b'A ' == primitive.MoveDestination
def test_aet_short_true(self):
"""Test using short AE titles."""
primitive = C_MOVE()
_config.USE_SHORT_DIMSE_AET = True
primitive.MoveDestination = b'A'
aet = primitive.MoveDestination
assert b'A' == primitive.MoveDestination
primitive.MoveDestination = b'ABCDEFGHIJKLMNO'
assert b'ABCDEFGHIJKLMNO' == primitive.MoveDestination
primitive.MoveDestination = b'ABCDEFGHIJKLMNOP'
assert b'ABCDEFGHIJKLMNOP' == primitive.MoveDestination
primitive.MoveDestination = b'ABCDEFGHIJKLMNOPQ'
assert b'ABCDEFGHIJKLMNOP' == primitive.MoveDestination
class TestPrimitive_C_ECHO(object):
"""Test DIMSE C-ECHO operations."""
def setup(self):
self.default_conformance = _config.ENFORCE_UID_CONFORMANCE
def teardown(self):
_config.ENFORCE_UID_CONFORMANCE = self.default_conformance
def test_assignment(self):
""" Check assignment works correctly """
primitive = C_ECHO()
primitive.MessageID = 11
assert primitive.MessageID == 11
primitive.MessageIDBeingRespondedTo = 13
assert primitive.MessageIDBeingRespondedTo == 13
# AffectedSOPClassUID
primitive.AffectedSOPClassUID = '1.1.1'
assert primitive.AffectedSOPClassUID == UID('1.1.1')
assert isinstance(primitive.AffectedSOPClassUID, UID)
primitive.AffectedSOPClassUID = UID('1.1.2')
assert primitive.AffectedSOPClassUID == UID('1.1.2')
assert isinstance(primitive.AffectedSOPClassUID, UID)
primitive.AffectedSOPClassUID = b'1.1.3'
assert primitive.AffectedSOPClassUID == UID('1.1.3')
assert isinstance(primitive.AffectedSOPClassUID, UID)
# Known status
primitive.Status = 0x0000
assert primitive.Status == 0x0000
# Unknown status
primitive.Status = 0x9999
assert primitive.Status == 0x9999
primitive.Status = 0xEE01
assert primitive.Status == 0xEE01
def test_uid_exceptions_false(self):
"""Test ValueError raised with ENFORCE_UID_CONFORMANCE = False."""
primitive = C_ECHO()
_config.ENFORCE_UID_CONFORMANCE = False
primitive.AffectedSOPClassUID = 'abc'
assert primitive.AffectedSOPClassUID == 'abc'
# Can't have more than 64 characters
with pytest.raises(ValueError):
primitive.AffectedSOPClassUID = 'abc' * 22
def test_uid_exceptions_true(self):
"""Test ValueError raised with ENFORCE_UID_CONFORMANCE = True."""
primitive = C_ECHO()
_config.ENFORCE_UID_CONFORMANCE = True
with pytest.raises(ValueError):
primitive.AffectedSOPClassUID = 'abc'
def test_exceptions(self):
""" Check incorrect types/values for properties raise exceptions """
primitive = C_ECHO()
# MessageID
with pytest.raises(TypeError):
primitive.MessageID = 'halp'
with pytest.raises(TypeError):
primitive.MessageID = 1.111
with pytest.raises(ValueError):
primitive.MessageID = 65536
with pytest.raises(ValueError):
primitive.MessageID = -1
# MessageIDBeingRespondedTo
with pytest.raises(TypeError):
primitive.MessageIDBeingRespondedTo = 'halp'
with pytest.raises(TypeError):
primitive.MessageIDBeingRespondedTo = 1.111
with pytest.raises(ValueError):
primitive.MessageIDBeingRespondedTo = 65536
with pytest.raises(ValueError):
primitive.MessageIDBeingRespondedTo = -1
# AffectedSOPClassUID
with pytest.raises(TypeError):
primitive.AffectedSOPClassUID = 45.2
with pytest.raises(TypeError):
primitive.AffectedSOPClassUID = 100
# Status
with pytest.raises(TypeError):
primitive.Status = 19.4
def test_conversion_rq(self):
""" Check conversion to a -RQ PDU produces the correct output """
primitive = C_ECHO()
primitive.MessageID = 7
primitive.AffectedSOPClassUID = '1.2.840.10008.1.1'
dimse_msg = C_ECHO_RQ()
dimse_msg.primitive_to_message(primitive)
pdvs = []
for fragment in dimse_msg.encode_msg(1, 16382):
pdvs.append(fragment)
cs_pdv = pdvs[0].presentation_data_value_list[0][1]
assert cs_pdv == c_echo_rq_cmd
def test_conversion_rsp(self):
""" Check conversion to a -RQ PDU produces the correct output """
primitive = C_ECHO()
primitive.MessageIDBeingRespondedTo = 8
primitive.AffectedSOPClassUID = '1.2.840.10008.1.1'
primitive.Status = 0x0000
dimse_msg = C_ECHO_RSP()
dimse_msg.primitive_to_message(primitive)
pdvs = []
for fragment in dimse_msg.encode_msg(1, 16382):
pdvs.append(fragment)
cs_pdv = pdvs[0].presentation_data_value_list[0][1]
assert cs_pdv == c_echo_rsp_cmd
def test_is_valid_request(self):
"""Test C_ECHO.is_valid_request"""
primitive = C_ECHO()
assert not primitive.is_valid_request
primitive.MessageID = 1
assert not primitive.is_valid_request
primitive.AffectedSOPClassUID = '1.2'
assert primitive.is_valid_request
def test_is_valid_resposne(self):
"""Test C_ECHO.is_valid_response."""
primitive = C_ECHO()
assert not primitive.is_valid_response
primitive.MessageIDBeingRespondedTo = 1
assert not primitive.is_valid_response
primitive.Status = 0x0000
assert primitive.is_valid_response
| 34.554495
| 80
| 0.660228
| 4,373
| 43,435
| 6.383261
| 0.045049
| 0.049438
| 0.0791
| 0.068066
| 0.916995
| 0.891309
| 0.849287
| 0.807946
| 0.796661
| 0.790213
| 0
| 0.032836
| 0.258179
| 43,435
| 1,256
| 81
| 34.582006
| 0.833494
| 0.084471
| 0
| 0.857981
| 0
| 0
| 0.033203
| 0.005885
| 0
| 0
| 0.007305
| 0
| 0.178404
| 1
| 0.064554
| false
| 0
| 0.012911
| 0
| 0.084507
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1c8178503afda1727e4b61f1d76f36f3fe82fd81
| 5,445
|
py
|
Python
|
test/test_storage.py
|
lgray/pytorch_sparse
|
51a81777257b30c0afb2e06a1e7282af646bf876
|
[
"MIT"
] | 1
|
2020-06-29T19:15:59.000Z
|
2020-06-29T19:15:59.000Z
|
test/test_storage.py
|
lgray/pytorch_sparse
|
51a81777257b30c0afb2e06a1e7282af646bf876
|
[
"MIT"
] | null | null | null |
test/test_storage.py
|
lgray/pytorch_sparse
|
51a81777257b30c0afb2e06a1e7282af646bf876
|
[
"MIT"
] | null | null | null |
from itertools import product
import pytest
import torch
from torch_sparse.storage import SparseStorage
from .utils import dtypes, devices, tensor
@pytest.mark.parametrize('dtype,device', product(dtypes, devices))
def test_storage(dtype, device):
row, col = tensor([[0, 0, 1, 1], [0, 1, 0, 1]], torch.long, device)
storage = SparseStorage(row=row, col=col)
assert storage.row().tolist() == [0, 0, 1, 1]
assert storage.col().tolist() == [0, 1, 0, 1]
assert storage.value() is None
assert storage.sparse_sizes() == (2, 2)
row, col = tensor([[0, 0, 1, 1], [1, 0, 1, 0]], torch.long, device)
value = tensor([2, 1, 4, 3], dtype, device)
storage = SparseStorage(row=row, col=col, value=value)
assert storage.row().tolist() == [0, 0, 1, 1]
assert storage.col().tolist() == [0, 1, 0, 1]
assert storage.value().tolist() == [1, 2, 3, 4]
assert storage.sparse_sizes() == (2, 2)
@pytest.mark.parametrize('dtype,device', product(dtypes, devices))
def test_caching(dtype, device):
row, col = tensor([[0, 0, 1, 1], [0, 1, 0, 1]], torch.long, device)
storage = SparseStorage(row=row, col=col)
assert storage._row.tolist() == row.tolist()
assert storage._col.tolist() == col.tolist()
assert storage._value is None
assert storage._rowcount is None
assert storage._rowptr is None
assert storage._colcount is None
assert storage._colptr is None
assert storage._csr2csc is None
assert storage.num_cached_keys() == 0
storage.fill_cache_()
assert storage._rowcount.tolist() == [2, 2]
assert storage._rowptr.tolist() == [0, 2, 4]
assert storage._colcount.tolist() == [2, 2]
assert storage._colptr.tolist() == [0, 2, 4]
assert storage._csr2csc.tolist() == [0, 2, 1, 3]
assert storage._csc2csr.tolist() == [0, 2, 1, 3]
assert storage.num_cached_keys() == 5
storage = SparseStorage(row=row, rowptr=storage._rowptr, col=col,
value=storage._value,
sparse_sizes=storage._sparse_sizes,
rowcount=storage._rowcount, colptr=storage._colptr,
colcount=storage._colcount,
csr2csc=storage._csr2csc, csc2csr=storage._csc2csr)
assert storage._rowcount.tolist() == [2, 2]
assert storage._rowptr.tolist() == [0, 2, 4]
assert storage._colcount.tolist() == [2, 2]
assert storage._colptr.tolist() == [0, 2, 4]
assert storage._csr2csc.tolist() == [0, 2, 1, 3]
assert storage._csc2csr.tolist() == [0, 2, 1, 3]
assert storage.num_cached_keys() == 5
storage.clear_cache_()
assert storage._rowcount is None
assert storage._rowptr is not None
assert storage._colcount is None
assert storage._colptr is None
assert storage._csr2csc is None
assert storage.num_cached_keys() == 0
@pytest.mark.parametrize('dtype,device', product(dtypes, devices))
def test_utility(dtype, device):
row, col = tensor([[0, 0, 1, 1], [1, 0, 1, 0]], torch.long, device)
value = tensor([1, 2, 3, 4], dtype, device)
storage = SparseStorage(row=row, col=col, value=value)
assert storage.has_value()
storage.set_value_(value, layout='csc')
assert storage.value().tolist() == [1, 3, 2, 4]
storage.set_value_(value, layout='coo')
assert storage.value().tolist() == [1, 2, 3, 4]
storage = storage.set_value(value, layout='csc')
assert storage.value().tolist() == [1, 3, 2, 4]
storage = storage.set_value(value, layout='coo')
assert storage.value().tolist() == [1, 2, 3, 4]
storage = storage.sparse_resize((3, 3))
assert storage.sparse_sizes() == (3, 3)
new_storage = storage.copy()
assert new_storage != storage
assert new_storage.col().data_ptr() == storage.col().data_ptr()
new_storage = storage.clone()
assert new_storage != storage
assert new_storage.col().data_ptr() != storage.col().data_ptr()
@pytest.mark.parametrize('dtype,device', product(dtypes, devices))
def test_coalesce(dtype, device):
row, col = tensor([[0, 0, 0, 1, 1], [0, 1, 1, 0, 1]], torch.long, device)
value = tensor([1, 1, 1, 3, 4], dtype, device)
storage = SparseStorage(row=row, col=col, value=value)
assert storage.row().tolist() == row.tolist()
assert storage.col().tolist() == col.tolist()
assert storage.value().tolist() == value.tolist()
assert not storage.is_coalesced()
storage = storage.coalesce()
assert storage.is_coalesced()
assert storage.row().tolist() == [0, 0, 1, 1]
assert storage.col().tolist() == [0, 1, 0, 1]
assert storage.value().tolist() == [1, 2, 3, 4]
@pytest.mark.parametrize('dtype,device', product(dtypes, devices))
def test_sparse_reshape(dtype, device):
row, col = tensor([[0, 1, 2, 3], [0, 1, 2, 3]], torch.long, device)
storage = SparseStorage(row=row, col=col)
storage = storage.sparse_reshape(2, 8)
assert storage.sparse_sizes() == (2, 8)
assert storage.row().tolist() == [0, 0, 1, 1]
assert storage.col().tolist() == [0, 5, 2, 7]
storage = storage.sparse_reshape(-1, 4)
assert storage.sparse_sizes() == (4, 4)
assert storage.row().tolist() == [0, 1, 2, 3]
assert storage.col().tolist() == [0, 1, 2, 3]
storage = storage.sparse_reshape(2, -1)
assert storage.sparse_sizes() == (2, 8)
assert storage.row().tolist() == [0, 0, 1, 1]
assert storage.col().tolist() == [0, 5, 2, 7]
| 37.294521
| 79
| 0.629017
| 758
| 5,445
| 4.408971
| 0.085752
| 0.229503
| 0.061041
| 0.062537
| 0.814782
| 0.782166
| 0.745362
| 0.730401
| 0.730401
| 0.684321
| 0
| 0.0473
| 0.20404
| 5,445
| 145
| 80
| 37.551724
| 0.723812
| 0
| 0
| 0.539823
| 0
| 0
| 0.013223
| 0
| 0
| 0
| 0
| 0
| 0.566372
| 1
| 0.044248
| false
| 0
| 0.044248
| 0
| 0.088496
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1c8e32aa3f195dd0c76de765eb06574c07e98a2b
| 352
|
py
|
Python
|
g13gui/ui/__init__.py
|
jtgans/g13gui
|
aa07ee91b0fd89eb8d9991291e11ca3a97ca11cc
|
[
"MIT"
] | 3
|
2021-10-16T01:28:24.000Z
|
2021-12-07T21:49:54.000Z
|
g13gui/ui/__init__.py
|
jtgans/g13gui
|
aa07ee91b0fd89eb8d9991291e11ca3a97ca11cc
|
[
"MIT"
] | 12
|
2021-05-09T16:57:18.000Z
|
2021-06-16T19:20:57.000Z
|
g13gui/ui/__init__.py
|
jtgans/g13gui
|
aa07ee91b0fd89eb8d9991291e11ca3a97ca11cc
|
[
"MIT"
] | null | null | null |
from g13gui.ui.profilecombobox import ProfileComboBox
from g13gui.ui.g13button import G13Button
from g13gui.ui.g13buttonpopover import G13ButtonPopover
from g13gui.ui.mainwindow import MainWindow
from g13gui.ui.profilepopover import ProfilePopover
from g13gui.ui.profilepopover import ProfilePopoverMode
from g13gui.ui.appindicator import AppIndicator
| 44
| 55
| 0.880682
| 42
| 352
| 7.380952
| 0.261905
| 0.225806
| 0.270968
| 0.167742
| 0.206452
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067901
| 0.079545
| 352
| 7
| 56
| 50.285714
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
98da15b704ed3abd33843fbacfcb2e2940fe99d3
| 282,682
|
py
|
Python
|
oops/gui_functions.py
|
mtasa-typescript/mtasa-wiki-dump
|
edea1746850fb6c99d6155d1d7891e2cceb33a5c
|
[
"MIT"
] | null | null | null |
oops/gui_functions.py
|
mtasa-typescript/mtasa-wiki-dump
|
edea1746850fb6c99d6155d1d7891e2cceb33a5c
|
[
"MIT"
] | 1
|
2021-02-24T21:50:18.000Z
|
2021-02-24T21:50:18.000Z
|
oops/gui_functions.py
|
mtasa-typescript/mtasa-wiki-dump
|
edea1746850fb6c99d6155d1d7891e2cceb33a5c
|
[
"MIT"
] | null | null | null |
# Autogenerated file. ANY CHANGES WILL BE OVERWRITTEN
from to_python.core.types import FunctionType, \
FunctionArgument, \
FunctionArgumentValues, \
FunctionReturnTypes, \
FunctionSignature, \
FunctionDoc, \
FunctionOOP, \
FunctionOOPField, \
CompoundOOPData, \
FunctionData, \
CompoundFunctionData
DUMP_PARTIAL = [
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiBlur",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='blur',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='' ,
arguments={
"guiElement": """the GUI element that you want to defocus """
},
result='returns true if the function was successful, false otherwise.' ,
),
url='guiBlur',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiBringToFront",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='bringToFront',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function brings a GUI element on top of others.' ,
arguments={
"guiElement": """the GUI element that you want to move to the front. """
},
result='returns true if the function was successful, false otherwise.' ,
),
url='guiBringToFront',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiCheckBoxGetSelected",
class_name='Element/GUI/Checkbox|GuiCheckBox',
method=FunctionData(
signature=FunctionSignature(
name='getSelected',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theCheckbox',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function gets a checkboxs selection state.' ,
arguments={
"theCheckbox": """The checkbox you wish to retrieve the selection state of. """
},
result='returns true if the checkbox is selected, false if it is not.' ,
),
url='guiCheckBoxGetSelected',
),
field=FunctionOOPField(
name='selected',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiCheckBoxSetSelected",
class_name='Element/GUI/Checkbox|GuiCheckBox',
method=FunctionData(
signature=FunctionSignature(
name='setSelected',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theCheckbox',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='state',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function selects (ticks) or unselects a checkbox.' ,
arguments={
"theCheckbox": """The GUI element in which you wish to change the selection state of """,
"state": """The state of the checkbox, where true indicates selected, and false indicates unselected. """
},
result='returns true if the checkboxs selection state was successfully set, false otherwise.' ,
),
url='guiCheckBoxSetSelected',
),
field=FunctionOOPField(
name='selected',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiComboBoxAddItem",
class_name='Element/GUI/Combobox|GuiComboBox',
method=FunctionData(
signature=FunctionSignature(
name='addItem',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='comboBox',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='value',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Adds an item to a combobox.' ,
arguments={
"comboBox": """The combobox you want to add a row to """,
"value": """The text that the item will contain. """
},
result='returns the item id if it has been created, false otherwise.' ,
),
url='guiComboBoxAddItem',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiComboBoxClear",
class_name='Element/GUI/Combobox|GuiComboBox',
method=FunctionData(
signature=FunctionSignature(
name='clear',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='comboBox',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function removes all the items from a combobox.' ,
arguments={
"comboBox": """The combobox element to be cleared """
},
result='returns true if the combobox element is valid and has been cleared successfully, false otherwise.' ,
),
url='guiComboBoxClear',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiComboBoxGetItemText",
class_name='Element/GUI/Combobox|GuiComboBox',
method=FunctionData(
signature=FunctionSignature(
name='getItemText',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='comboBox',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='itemId',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function retrieves the text from a specific combobox item.' ,
arguments={
"comboBox": """The combobox containing the item youre interested in """,
"itemId": """The index of the item """
},
result='returns the text of the item if the arguments are right, false otherwise.' ,
),
url='guiComboBoxGetItemText',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiComboBoxGetSelected",
class_name='Element/GUI/Combobox|GuiComboBox',
method=FunctionData(
signature=FunctionSignature(
name='getSelected',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='comboBox',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the index of the selected combobox item.' ,
arguments={
"comboBox": """the combobox you want to know the selected item index of """
},
result='returns the index of the selected item if the specified combobox is valid and has a selected item, -1 if no item is selected, nil otherwise.' ,
),
url='guiComboBoxGetSelected',
),
field=FunctionOOPField(
name='selected',
types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiComboBoxIsOpen",
class_name='Element/GUI/Combobox|GuiComboBox',
method=FunctionData(
signature=FunctionSignature(
name='isOpen',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='comboBox',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='' ,
arguments={
"comboBox": """The combo box to get the state. """
},
result='returns true if combobox is opened, false if combobox is closed, nil otherwise.' ,
),
url='guiComboBoxIsOpen',
),
field=FunctionOOPField(
name='open',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiComboBoxRemoveItem",
class_name='Element/GUI/Combobox|GuiComboBox',
method=FunctionData(
signature=FunctionSignature(
name='removeItem',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='comboBox',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='itemId',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function removes an item from a combobox.' ,
arguments={
"comboBox": """The combobox containing the item youre interested in """,
"itemId": """The index of the item to remove """
},
result='returns true if the item was removes successfully, false otherwise.' ,
),
url='guiComboBoxRemoveItem',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiComboBoxSetItemText",
class_name='Element/GUI/Combobox|GuiComboBox',
method=FunctionData(
signature=FunctionSignature(
name='setItemText',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='comboBox',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='itemId',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='text',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function changes the text of a combobox item.' ,
arguments={
"comboBox": """The combobox containing the item youre interested in """,
"itemId": """The index of the item """,
"text": """The text you want to put in (does NOT accept numbers, use tostring() for that) """
},
result='returns true if the text was set successfully, false otherwise.' ,
),
url='guiComboBoxSetItemText',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiComboBoxSetOpen",
class_name='Element/GUI/Combobox|GuiComboBox',
method=FunctionData(
signature=FunctionSignature(
name='setOpen',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='comboBox',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='state',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='' ,
arguments={
"comboBox": """The combobox to be opened or closed. """,
"state": """The state of combobox. true, if the combobox is to be opened. false if the combobox is to be closed. """
},
result='returns true if is successful, false otherwise.' ,
),
url='guiComboBoxSetOpen',
),
field=FunctionOOPField(
name='open',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiComboBoxSetSelected",
class_name='Element/GUI/Combobox|GuiComboBox',
method=FunctionData(
signature=FunctionSignature(
name='setSelected',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='comboBox',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='itemIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function sets the selected item from a combobox.' ,
arguments={
"comboBox": """the combobox you want to select an item from """,
"itemIndex": """the item you want to select (item 0 is the first item). If -1 is specified, then the combo box text is set to its caption. """
},
result='returns true if the selected item has been changed successfully, false otherwise.' ,
),
url='guiComboBoxSetSelected',
),
field=FunctionOOPField(
name='selected',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiCreateBrowser",
class_name='Element/gui-browser|GuiBrowser',
method=None,
field=None,
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiCreateButton",
class_name='Element/GUI/Button|GuiButton',
method=None,
field=None,
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiCreateCheckBox",
class_name='Element/GUI/Checkbox|GuiCheckBox',
method=None,
field=None,
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiCreateComboBox",
class_name='Element/GUI/Combobox|GuiComboBox',
method=None,
field=None,
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiCreateEdit",
class_name='Element/GUI/Edit_field|GuiEdit',
method=None,
field=None,
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiCreateFont",
class_name='Element/GUI font|GuiFont',
method=None,
field=None,
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiCreateGridList",
class_name='Element/GUI/Gridlist|GuiGridList',
method=None,
field=None,
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiCreateLabel",
class_name='Element/GUI/Text label|GuiLabel',
method=None,
field=None,
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiCreateMemo",
class_name='GuiMemo',
method=None,
field=None,
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiCreateRadioButton",
class_name='Element/GUI/Radio button|GuiRadioButton',
method=None,
field=None,
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiCreateStaticImage",
class_name='Element/GUI/Static_image|GuiStaticImage',
method=None,
field=None,
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiCreateTab",
class_name='Element/GUI/Tab|GuiTab',
method=None,
field=None,
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiCreateTabPanel",
class_name='Element/GUI/Tab panel|GuiTabPanel',
method=None,
field=None,
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiCreateWindow",
class_name='Element/GUI/Window|GuiWindow',
method=None,
field=None,
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiDeleteTab",
class_name='Element/GUI/Tab|GuiTab',
method=FunctionData(
signature=FunctionSignature(
name='delete',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='tabToDelete',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='tabPanel',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function deletes a tab from a tab panel.' ,
arguments={
"tabToDelete": """This is an element representing the tab that you want to delete. """,
"tabPanel": """This is the guiCreateTabPanel|tab panel parent that the tab is attached to. """
},
result='returns true the tab was successfully deleted, false otherwise.' ,
),
url='guiDeleteTab',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiEditGetCaretIndex",
class_name='Element/GUI/Edit_field|GuiEdit',
method=FunctionData(
signature=FunctionSignature(
name='getCaretIndex',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the caret (the text cursor) position within the editbox.' ,
arguments={
"theElement": """The edit box you want to get the caret position from """
},
result='returns the caret index on success, false otherwise.' ,
),
url='guiEditGetCaretIndex',
),
field=FunctionOOPField(
name='caretIndex',
types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiEditGetMaxLength",
class_name='Element/GUI/Edit_field|GuiEdit',
method=FunctionData(
signature=FunctionSignature(
name='getMaxLength',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiEdit',
argument_type=FunctionType(
names=['gui-edit'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='' ,
arguments={
"guiEdit": """The edit box you want to get the maximum text length of. """
},
result='returns the maximum text length on success, false otherwise.' ,
),
url='guiEditGetMaxLength',
),
field=FunctionOOPField(
name='maxLength',
types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiEditIsMasked",
class_name='None',
method=None,
field=FunctionOOPField(
name='masked',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=None,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiEditIsReadOnly",
class_name='None',
method=None,
field=FunctionOOPField(
name='readOnly',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=None,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiEditSetCaretIndex",
class_name='Element/GUI/Edit_field|GuiEdit',
method=FunctionData(
signature=FunctionSignature(
name='setCaretIndex',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='index',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function sets the current position of the caret (the text cursor) within the edit box.' ,
arguments={
"theElement": """The edit box to be changed. """,
"index": """An integer referring to the desired position within the box. """
},
result='returns true if the index was successfully set, false otherwise.' ,
),
url='guiEditSetCaretIndex',
),
field=FunctionOOPField(
name='caretIndex',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiEditSetMasked",
class_name='Element/GUI/Edit_field|GuiEdit',
method=FunctionData(
signature=FunctionSignature(
name='setMasked',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='status',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function sets or removes masking (covering up the text being typed) for password text fields.' ,
arguments={
"theElement": """The edit box to be changed. """,
"status": """A boolean value indicating whether masking is to be enabled or disabled. """
},
result='returns true if the function is successful, false otherwise.' ,
),
url='guiEditSetMasked',
),
field=FunctionOOPField(
name='masked',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiEditSetMaxLength",
class_name='Element/GUI/Edit_field|GuiEdit',
method=FunctionData(
signature=FunctionSignature(
name='setMaxLength',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiEdit',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='length',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function sets the maximum text length that can be typed into an edit box.' ,
arguments={
"theElement": """The edit box to be changed. """,
"length": """An integer indicating the maximum number of characters that can be typed into the box. """
},
result='returns true if the max length was set successfully, false otherwise.' ,
),
url='guiEditSetMaxLength',
),
field=FunctionOOPField(
name='maxLength',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiEditSetReadOnly",
class_name='Element/GUI/Edit_field|GuiEdit',
method=FunctionData(
signature=FunctionSignature(
name='setReadOnly',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='editField',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='status',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function allows you to set or remove read-only status for an edit box. If read-only is set to true, the box is not editable.' ,
arguments={
"editField": """The element of the Element/GUI/Edit field|edit field to be modified. """,
"status": """A boolean value indicating whether read-only is to be enabled or disabled. """
},
result='returns true if edit fields read-only status was changed successfully, false otherwise.' ,
),
url='guiEditSetReadOnly',
),
field=FunctionOOPField(
name='readOnly',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiFocus",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='focus',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='' ,
arguments={
"guiElement": """the GUI element that you want to focus """
},
result='returns true if the function was successful, false otherwise.' ,
),
url='guiFocus',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGetAlpha",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='getAlpha',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Alpha represents the transparency of a gui element. This function allows retrieval of a gui elements current alpha.' ,
arguments={
"guiElement": """The gui element in which you want to retrieve the alpha of. """
},
result='this function returns a positive integer in between 0 and 1 of the gui elements current alpha, or false if it could not be retrieved.' ,
),
url='guiGetAlpha',
),
field=FunctionOOPField(
name='alpha',
types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGetBrowser",
class_name='Element/gui-browser|guiBrowser',
method=FunctionData(
signature=FunctionSignature(
name='getBrowser',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['browser'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theBrowser',
argument_type=FunctionType(
names=['gui-browser'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function gets the browser element behind a gui-browser (a browser that has been created via guiCreateBrowser).' ,
arguments={
"theBrowser": """The gui-browser """
},
result='returns the element/browser|browser element if a correct element/gui-browser|gui-browser has been passed, false otherwise.' ,
),
url='guiGetBrowser',
),
field=FunctionOOPField(
name='browser',
types=[
FunctionType(
names=['browser'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGetCursorType",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='getCursorType',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function is used to get the type of the current cursor image.' ,
arguments={
},
result='returns a string containing the cursor type:\n* none // cursor has no image\n* arrow // default cursor\n* sizing_ns // n-s (up-down) sizing cursor\n* sizing_ew // e-w (left-right) sizing cursor\n* sizing_nwse // nw-se diagonal sizing cursor\n* sizing_nesw // ne-sw diagonal sizing cursor\n* sizing_eswe // es-we horizontal sizing cursor\n* move // move cursor\n* container_drag // drag container cursor (note: not in use)\n* segment_moving // segment moving cursor (note: not in use)\n* segment_sizing // segment sizing cursor (note: not in use)' ,
),
url='guiGetCursorType',
),
field=FunctionOOPField(
name='cursorType',
types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
),
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGetEnabled",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='getEnabled',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function determines if a GUI element is enabled.' ,
arguments={
"guiElement": """the GUI element to be checked. """
},
result='returns true if the element is enabled, false otherwise.' ,
),
url='guiGetEnabled',
),
field=FunctionOOPField(
name='enabled',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGetFont",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='getFont',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
),
FunctionType(
names=['element'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function is used to get the current font that is used to draw text in GUI elements.' ,
arguments={
"guiElement": """element you wish to get the font of. """
},
result='*string a string containing the name of the elements current font, or false if the gui element passed to the function is invalid.\n*element the custom gui font that is used, or nil otherwise' ,
),
url='guiGetFont',
),
field=FunctionOOPField(
name='font',
types=[
FunctionType(
names=['string'],
is_optional=False,
),
FunctionType(
names=['element'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGetInputEnabled",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='isInputEnabled',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function checks whether user input is focused on the GUI or the game.' ,
arguments={
},
result='returns true if input is focused on gui, false if its focused on the game.' ,
),
url='guiGetInputEnabled',
),
field=FunctionOOPField(
name='inputEnabled',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGetInputMode",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='getInputMode',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the current input mode as set by guiSetInputMode.\nDefault mode is allow_binds.' ,
arguments={
},
result='returns a string defining the current input mode, potential values are:\n* allow_binds: binds are enabled, hence using a key such as t in an editbox will still activate the chatbox\n* no_binds: binds are disabled, hence using a key such as t in an editbox will not activate the chatbox\n* no_binds_when_editing: binds are always enabled except when an editable editbox or memo has input focus' ,
),
url='guiGetInputMode',
),
field=FunctionOOPField(
name='inputMode',
types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
),
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGetPosition",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='getPosition',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
),
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='relative',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function allows retrieval of a GUI elements current position, relative to its parent.' ,
arguments={
"guiElement": """The gui element of which you wish to retrieve the position. """,
"relative": """A boolean representing whether the position should be relative to the elements parent width, or the number of offset pixels from the parents origin. """
},
result='returns floats representing the x and y position of the element, or false if the position could not be retrieved.' ,
),
url='guiGetPosition',
),
field=FunctionOOPField(
name='position',
types=[
FunctionType(
names=['float'],
is_optional=False,
),
FunctionType(
names=['float'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGetProperties",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='getProperties',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['table'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function gets a list of the CEGUI property names and values of a GUI element. To find out what the different properties mean, check out the http://static.cegui.org.uk/static/WindowsLookProperties.html CEGUI properties page.' ,
arguments={
"guiElement": """the GUI element you wish to get the properties of. """
},
result='if the function succeeds, the return value is a table. its keys are property names, the corresponding values are the values of the properties (both names and values are always strings). if the function fails, it returns false.' ,
),
url='guiGetProperties',
),
field=FunctionOOPField(
name='properties',
types=[
FunctionType(
names=['table'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGetProperty",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='getProperty',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='property',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function gets the value of a specific CEGUI property of a GUI element. For a list of properties and their meaning, see the http://static.cegui.org.uk/static/WindowsLookProperties.html CEGUI properties page.' ,
arguments={
"guiElement": """the GUI element you wish to get a property of. """,
"property": """the name of of property you want the value of. """
},
result='if the function succeeds, it returns a string with the value of the property. if it fails, it returns false.' ,
),
url='guiGetProperty',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGetScreenSize",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='getScreenSize',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
),
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function retrieves the local screen size according to the resolution they are using.' ,
arguments={
},
result='this returns two floats representing the players screen resolution, width and height.' ,
),
url='guiGetScreenSize',
),
field=None,
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGetSelectedTab",
class_name='Element/GUI/Tab panel|GuiTabPanel',
method=FunctionData(
signature=FunctionSignature(
name='getSelectedTab',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['element'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='tabPanel',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the currently selected tab in the specified Element/GUI/Tab panel|tab panel.' ,
arguments={
"tabPanel": """The Element/GUI/Tab panel|tab panel which current tab you want to retrieve. """
},
result='returns an element of the element/gui/tab|tab if a tab was selected or nil if no tab was selected. if passed arguments were invalid or something went wrong, the function will return false.' ,
),
url='guiGetSelectedTab',
),
field=FunctionOOPField(
name='selectedTab',
types=[
FunctionType(
names=['element'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGetSize",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='getSize',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
),
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='relative',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function gets the size of a GUI element.' ,
arguments={
"theElement": """The GUI element to get size of. """,
"relative": """A boolean representing whether the size should be relative to the elements parent width, or an absolute size in pixels. """
},
result='returns the gui element size x and y if the function has been successful, false otherwise.' ,
),
url='guiGetSize',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGetText",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='getText',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function is used to get the text of GUI elements like edit boxes, labels, buttons etc.' ,
arguments={
"guiElement": """element you wish to get text of. """
},
result='returns a string containing the requested elements text, or false if the gui element passed to the function is invalid.' ,
),
url='guiGetText',
),
field=FunctionOOPField(
name='text',
types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGetVisible",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='getVisible',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function determines if a GUI element is visible.' ,
arguments={
"guiElement": """the GUI element to be checked """
},
result='returns true if the element is visible, false otherwise.' ,
),
url='guiGetVisible',
),
field=FunctionOOPField(
name='visible',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListAddColumn",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='addColumn',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='title',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='width',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function is used to create columns in grid lists.' ,
arguments={
"gridList": """The grid list you want to add a column to """,
"title": """Title of the column """,
"width": """Column width, relative to the grid list width """
},
result='returns the column id if it was created, false otherwise.' ,
),
url='guiGridListAddColumn',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListAddRow",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='addRow',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='itemText1',
argument_type=FunctionType(
names=['int', 'string'],
is_optional=True,
),
default_value=None,
)
],
[
FunctionArgument(
name='itemText2',
argument_type=FunctionType(
names=['int', 'string'],
is_optional=True,
),
default_value=None,
)
]
],
variable_length=True,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Adds a row to a grid list, and optionally add simple text items with your rows. Use guiGridListSetItemText to add row headers.\nATTENTION: Without guiGridListSetItemText there is no row added to the grid.\nLook at the example, first you give the row a name with row = guiGridListAddRow ( playerList ), and then you use guiGridListSetItemText. }}' ,
arguments={
"gridList": """The grid list you want to add a row to """,
"itemText1": """The text for the first column item in the row. Either a string or a number can be passed (use numbers for sorting purposes). """,
"itemText2": """The text for the second column item in the row. Either a string or a number can be passed (use numbers for sorting purposes). """,
"...": """Item text for any other columns """
},
result='returns the row id if it has been created, false otherwise.' ,
),
url='guiGridListAddRow',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListAutoSizeColumn",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='autoSizeColumn',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='columnIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This allows you to automatically size a column to display everything in it correctly, with the most minimal width.' ,
arguments={
"gridList": """The Element/GUI/Gridlist|grid list element where the column is located. """,
"columnIndex": """The ID of the column you want to be auto-sized. """
},
result='returns true if the column was auto-sized, false otherwise.' ,
),
url='guiGridListAutoSizeColumn',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListClear",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='clear',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function clears all the data from a grid list.' ,
arguments={
"gridList": """The grid list element to be cleared """
},
result='returns true if the grid list element is valid and has been cleared successfully, false otherwise.' ,
),
url='guiGridListClear',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListGetColumnCount",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='getColumnCount',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This allows you to get the count of existing columns in a gridlist.' ,
arguments={
"gridList": """The grid list you want to add a column to """
},
result='returns an integer with the amount of columns in the gridlist, false otherwise.' ,
),
url='guiGridListGetColumnCount',
),
field=FunctionOOPField(
name='columnCount',
types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListGetColumnTitle",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='getColumnTitle',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiGridlist',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='columnIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function is used to get the column title of a gridlist column.' ,
arguments={
"guiGridlist": """: The grid list you want to get the column title from """,
"columnIndex": """: Column ID """
},
result='returns a string containing the column title, or false otherwise.' ,
),
url='guiGridListGetColumnTitle',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListGetColumnWidth",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='getColumnWidth',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='columnIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='relative',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This allows you to get the width of an existing column in a gridlist.' ,
arguments={
"gridList": """The grid list you want to add a column to """,
"columnIndex": """Column ID of the Get size """,
"relative": """A boolean defining whether width measurements will be relative to the Gridlist size, or absolute pixels. """
},
result='returns the width of the gridlist column, false if bad arguments were given.' ,
),
url='guiGridListGetColumnWidth',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListGetHorizontalScrollPosition",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='getHorizontalScrollPosition',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiGridlist',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function is used to get the horizontal scroll position from a grid list' ,
arguments={
"guiGridlist": """: The grid list you want to get the horizontal scroll position from """
},
result='returns a integer between 0 and 100 indicating the horizontal scroll position, or false otherwise.' ,
),
url='guiGridListGetHorizontalScrollPosition',
),
field=FunctionOOPField(
name='horizontalScrollPosition',
types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListGetItemColor",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='getItemColor',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
),
FunctionType(
names=['int'],
is_optional=False,
),
FunctionType(
names=['int'],
is_optional=False,
),
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='rowIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='columnIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function gets the color of a gridlist item.' ,
arguments={
"gridList": """The grid list element """,
"rowIndex": """Row ID """,
"columnIndex": """Column ID """
},
result='returns four int values, representing the amount of red, green, blue and alpha if successful. false otherwise.' ,
),
url='guiGridListGetItemColor',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListGetItemData",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='getItemData',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['var'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='rowIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='columnIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='With this function you can retrieve the string data associated with an item in a Element/GUI/Gridlist|grid list. This is not the text that is displayed on the item, but an internal string that you can use to hold extra information about the item.<br/>\nNote: This function will only work after you set the items text using guiGridListSetItemText!' ,
arguments={
"gridList": """the grid list containing the item youre interested in """,
"rowIndex": """the row index of the item """,
"columnIndex": """the column index of the item """
},
result='returns the item data of the specified item if succesful, false if one of the arguments was invalid.' ,
),
url='guiGridListGetItemData',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListGetItemText",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='getItemText',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='rowIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='columnIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function retrieves the text from a specific grid list item.' ,
arguments={
"gridList": """the gridlist containing the item youre interested in """,
"rowIndex": """row id of the item (first is 0) """,
"columnIndex": """column id of the item (first is 0) """
},
result='returns the text of the item if the arguments are right, false otherwise.' ,
),
url='guiGridListGetItemText',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListGetRowCount",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='getRowCount',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the number of rows in a grid list.' ,
arguments={
"gridList": """The grid list to get the number of rows from. """
},
result='returns the number of rows if the function is successful, false otherwise.' ,
),
url='guiGridListGetRowCount',
),
field=FunctionOOPField(
name='rowCount',
types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListGetSelectedCount",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='getSelectedCount',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the amount of options selected in the specified Element/GUI/Gridlist|grid list.' ,
arguments={
"gridList": """The Element/GUI/Gridlist|grid list which amount of selected items you want to retrieve. """
},
result='returns an integer representing the amount of selected options if everything was successful or false if invalid arguments were passed.' ,
),
url='guiGridListGetSelectedCount',
),
field=FunctionOOPField(
name='selectedCount',
types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListGetSelectedItem",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='getSelectedItem',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
),
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the row and column indexes of the selected item in a grid list. First selected row and column is (0, 0).' ,
arguments={
"gridList": """the grid list you want to know the selected row index of """
},
result='returns the row and column indexes of the selected item if the specified grid list is valid and has a selected item, (-1, -1) if no item is selected, false otherwise.' ,
),
url='guiGridListGetSelectedItem',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListGetSelectedItems",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='getSelectedItems',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['table'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the items selected in the specified Element/GUI/Gridlist|grid list.\nNote that for some reason the column ID is 1 lower than it should be, for example 0 is returned but if you try and get the text for column 0 there is nothing, but column 1 has what you clicked on.' ,
arguments={
"gridList": """The Element/GUI/Gridlist|grid list which selected items you want to retrieve. """
},
result='returns a table over the selected items in the element/gui/gridlist|grid list in this format:\n<syntaxhighlight lang=lua>\ntable = {\n1 = {\ncolumn, -- has the first selected items column id\nrow -- has the first selected items row id\n},\n2 = {\ncolumn,-- has the second selected items column id\nrow -- has the second selected items row id\n},\n...\n}\n</syntaxhighlight>\nif everything was successful or false if invalid arguments were passed.' ,
),
url='guiGridListGetSelectedItems',
),
field=FunctionOOPField(
name='selectedItems',
types=[
FunctionType(
names=['table'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListGetSelectionMode",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='getSelectionMode',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridlist',
argument_type=FunctionType(
names=['gui-Element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='' ,
arguments={
"gridlist": """The gridlist you want to get the selection mode of. """
},
result='returns the id of the current gridlists selection mode.' ,
),
url='guiGridListGetSelectionMode',
),
field=FunctionOOPField(
name='selectionMode',
types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListGetVerticalScrollPosition",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='getVerticalScrollPosition',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiGridlist',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function is used to get the vertical scroll position from a grid list' ,
arguments={
"guiGridlist": """: The grid list you want to get the vertical scroll position from """
},
result='returns a integer between 0 and 100 indicating the vertical scroll position, or false otherwise.' ,
),
url='guiGridListGetVerticalScrollPosition',
),
field=FunctionOOPField(
name='verticalScrollPosition',
types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListInsertRowAfter",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='insertRowAfter',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='rowIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This allows you to insert a new row after a specified row, and simultaneously set text. Good for inserting new rows in the middle of existing rows. To insert at the top use -1 as row index.' ,
arguments={
"gridList": """The grid list you want to add a row to """,
"rowIndex": """Row ID of the row you want to insert the new row after. """,
"itemText1": """The text for the first column item in the row. Either a string or a number can be passed (use numbers for sorting purposes). """,
"itemText2": """The text for the second column item in the row. Either a string or a number can be passed (use numbers for sorting purposes). """,
"...": """Item text for any other columns """
},
result='returns true if the row was successfully added, false otherwise.' ,
),
url='guiGridListInsertRowAfter',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListIsSortingEnabled",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='isSortingEnabled',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiGridlist',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='' ,
arguments={
"guiGridlist": """The GUI gridlist you wish to check if sorting is enabled or not. """
},
result='returns true if sorting is enabled, false otherwise.' ,
),
url='guiGridListIsSortingEnabled',
),
field=FunctionOOPField(
name='sortingEnabled',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListRemoveColumn",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='removeColumn',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='columnIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This allows you to delete columns that exist in grid lists.' ,
arguments={
"gridList": """The grid list you want to remove a column from """,
"columnIndex": """Column ID """
},
result='returns true if the grid list column was successfully removed, false otherwise.' ,
),
url='guiGridListRemoveColumn',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListRemoveRow",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='removeRow',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='rowIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This allows you to delete rows that exist in grid lists.' ,
arguments={
"gridList": """The grid list you want to remove a row from """,
"rowIndex": """The row ID which you want to remove """
},
result='returns true if the grid list row was successfully removed, false otherwise.' ,
),
url='guiGridListRemoveRow',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListSetColumnTitle",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='setColumnTitle',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiGridlist',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='columnIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='title',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function is used to change the column title of a gridlist column.' ,
arguments={
"guiGridlist": """: The grid list you want to change the column title from """,
"columnIndex": """: Column ID """,
"title": """: The title of the column """
},
result='returns true if the new title was set, or false otherwise.' ,
),
url='guiGridListSetColumnTitle',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListSetColumnWidth",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='setColumnWidth',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='columnIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='width',
argument_type=FunctionType(
names=['number'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='relative',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This allows you to set the width of an existing column in a gridlist.' ,
arguments={
"gridList": """The grid list you want to add a column to """,
"columnIndex": """Column ID of the size you want to change """,
"width": """A float or integer of the width of the column depending on the relative argument. """,
"relative": """A boolean defining whether width measurements will be relative to the Gridlist size, or absolute pixels. """
},
result='returns true if the gridlist column width was successfully set, false if bad arguments were given.' ,
),
url='guiGridListSetColumnWidth',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListSetHorizontalScrollPosition",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='setHorizontalScrollPosition',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiGridlist',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='fPosition',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function is used to set the horizontal scroll position from a grid list' ,
arguments={
"guiGridlist": """: The grid list you want to set the horizontal scroll position from """,
"fPosition": """: A float representing the horizontal scroll position (0-100) """
},
result='returns true if the horizontal scroll position was set, or false otherwise.' ,
),
url='guiGridListSetHorizontalScrollPosition',
),
field=FunctionOOPField(
name='horizontalScrollPosition',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListSetItemColor",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='setItemColor',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='rowIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='columnIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='red',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='green',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='blue',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='alpha',
argument_type=FunctionType(
names=['int'],
is_optional=True,
),
default_value='255',
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function changes the color of a gridlist item.' ,
arguments={
"gridList": """The grid list element """,
"rowIndex": """Row ID """,
"columnIndex": """Column ID """,
"red": """The amount of red in the color (0-255) """,
"green": """The amount of green in the color (0-255) """,
"blue": """The amount of blue in the color (0-255) """,
"alpha": """The amount of alpha in the color (0-255). """
},
result='returns true if the item color was set successfully, false otherwise.' ,
),
url='guiGridListSetItemColor',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListSetItemData",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='setItemData',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='rowIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='columnIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='data',
argument_type=FunctionType(
names=['var'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function sets a Item Data associated to a grid list item.<br/>\nNote: This function will only work after you set the items text using guiGridListSetItemText!' ,
arguments={
"gridList": """A gridlist element of the data you wish to set to """,
"rowIndex": """The row of the item you wish to set to """,
"columnIndex": """The column of the item you wish to set to """,
"data": """The data you wish to set to the item. """
},
result='returns true if the data was set successfully, false otherwise' ,
),
url='guiGridListSetItemData',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListSetItemText",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='setItemText',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='rowIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='columnIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='text',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='section',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='number',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function changes the text of a gridlist item.\nNotice: This function doesnt work well with Sorting. If you are using sorting, please use the optional arguments of guiGridListAddRow as much as possible.' ,
arguments={
"gridList": """The grid list element """,
"rowIndex": """Row ID """,
"columnIndex": """Column ID """,
"text": """The text you want to put in (does NOT accept numbers, use tostring() for that) """,
"section": """Determines if the item is a section """,
"number": """Tells whether the text item is a number value or not (used for sorting) """
},
result='returns true if the item text was set successfully, false otherwise.' ,
),
url='guiGridListSetItemText',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListSetScrollBars",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='setScrollBars',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiGridlist',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='horizontalBar',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='verticalBar',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function allows a gridlists scrollbar to be forced on, or returned to default.' ,
arguments={
"guiGridlist": """The GUI gridlist you wish to change the state of scrollbars """,
"horizontalBar": """A bool where true forces the horizontal scrollbar on, and false returns them to default. """,
"verticalBar": """A bool where true forces the verical scrollbar on, and false returns them to default. """
},
result='returns true if the scrollbars were successfully set, false otherwise.' ,
),
url='guiGridListSetScrollBars',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListSetSelectedItem",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='setSelectedItem',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridList',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='rowIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='columnIndex',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='bReset',
argument_type=FunctionType(
names=['bool'],
is_optional=True,
),
default_value='true',
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function selects an item from a gridlist. If you wish to deselect whatever item is selected, pass 0 as both the rowIndex and columnIndex arguments.' ,
arguments={
"gridList": """the grid list you want to select an item from """,
"rowIndex": """the row you want to select (index 0 is the first row) """,
"columnIndex": """the column you want to select (index 1 is the first column) """,
"bReset": """set to false for multiple selections """
},
result='returns true if the passed arguments are correct and the item has been selected, false otherwise.' ,
),
url='guiGridListSetSelectedItem',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListSetSelectionMode",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='setSelectionMode',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='gridlist',
argument_type=FunctionType(
names=['gui-Element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='mode',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function sets the selection mode of a gui gridlist. For example, the MTA server browser selects a whole row, while the Controls dialog selects a single cell. To select multiple items you must be holding down ctrl.' ,
arguments={
"gridlist": """The gridlist in which you wish to set the selection mode. """,
"mode": """The mode of the selection. Can be the following values: """,
"0": """Single row selection """,
"1": """Multiple row selection """,
"2": """Single cell selection """,
"3": """Multiple cell selection """,
"4": """Nominated(First) single column selection """,
"5": """Nominated(First) multiple column selection """,
"6": """Single column selection """,
"7": """Multiple column selection """,
"8": """Nominated(First) single row selection """,
"9": """Nominated(First) multiple row selection """
},
result='returns true if the selection mode was successfully set, false otherwise.' ,
),
url='guiGridListSetSelectionMode',
),
field=FunctionOOPField(
name='selectionMode',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListSetSortingEnabled",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='setSortingEnabled',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiGridlist',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='enabled',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function allows the disabling or enabling of sorting within a gridlist. Sorting is achieved by clicking a column header. Gridlist items will be sorted according to the clicked column. By default, gridlists have sorting enabled. This function will allow you to toggle this.' ,
arguments={
"guiGridlist": """The GUI gridlist you wish to toggle the sorting of. """,
"enabled": """A boolean representing whether the sorting is enabled, or disabled. """
},
result='returns true if sorting was successfully toggled., false otherwise.' ,
),
url='guiGridListSetSortingEnabled',
),
field=FunctionOOPField(
name='sortingEnabled',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiGridListSetVerticalScrollPosition",
class_name='Element/GUI/Gridlist|GuiGridList',
method=FunctionData(
signature=FunctionSignature(
name='setVerticalScrollPosition',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiGridlist',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='fPosition',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function is used to set the vertical scroll position from a grid list' ,
arguments={
"guiGridlist": """: The grid list you want to set the vertical scroll position from """,
"fPosition": """: A float representing the vertical scroll position (0-100) """
},
result='returns true if the vertical scroll position was set, or false otherwise.' ,
),
url='guiGridListSetVerticalScrollPosition',
),
field=FunctionOOPField(
name='verticalScrollPosition',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiLabelGetColor",
class_name='Element/GUI/Text label|GuiLabel',
method=FunctionData(
signature=FunctionSignature(
name='getColor',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
),
FunctionType(
names=['int'],
is_optional=False,
),
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theLabel',
argument_type=FunctionType(
names=['gui-element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function gets the color of a label.' ,
arguments={
"theLabel": """The label to get color. """
},
result='returns three int values, representing the amount of red, green, blue if successful. false otherwise.' ,
),
url='guiLabelGetColor',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiLabelGetFontHeight",
class_name='Element/GUI/Text label|GuiLabel',
method=FunctionData(
signature=FunctionSignature(
name='getFontHeight',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theLabel',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the height of the font currently used in a GUI text label.' ,
arguments={
"theLabel": """The text label to get the font height from. """
},
result='returns the absolute height of the font currently used in the text label if the function is successful, false otherwise.' ,
),
url='guiLabelGetFontHeight',
),
field=FunctionOOPField(
name='fontHeight',
types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiLabelGetTextExtent",
class_name='Element/GUI/Text label|GuiLabel',
method=FunctionData(
signature=FunctionSignature(
name='getTextExtent',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theLabel',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the extent, or width, of the current text inside a GUI text label.' ,
arguments={
"theLabel": """The text label to get the text extent from. """
},
result='returns the absolute width of the current text inside the text label if the function is successful, false otherwise.' ,
),
url='guiLabelGetTextExtent',
),
field=FunctionOOPField(
name='textExtent',
types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiLabelSetColor",
class_name='Element/GUI/Text label|GuiLabel',
method=FunctionData(
signature=FunctionSignature(
name='setColor',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='red',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='green',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='blue',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function allows you to set the color of a GUI label.' ,
arguments={
"theElement": """The label to be changed. """,
"red": """An integer specifying the amount of red (0 to 255). """,
"green": """An integer specifying the amount of green (0 to 255). """,
"blue": """An integer specifying the amount of blue (0 to 255). """
},
result='returns true if the the color of the gui label was successfully changed, false otherwise.' ,
),
url='guiLabelSetColor',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiLabelSetHorizontalAlign",
class_name='Element/GUI/Text label|GuiLabel',
method=FunctionData(
signature=FunctionSignature(
name='setHorizontalAlign',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theLabel',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='align',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='wordwrap',
argument_type=FunctionType(
names=['bool'],
is_optional=True,
),
default_value='false',
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function sets the horizontal alignment of a text label.' ,
arguments={
"theLabel": """The text label to set the horizontal alignment on. """,
"align": """The alignment type. Valid type strings are:
**"left"
**"center"
**"right" """,
"wordwrap": """Whether or not to enable wordwrap for the gui-label. """
},
result='returns true on success, false otherwise.' ,
),
url='guiLabelSetHorizontalAlign',
),
field=FunctionOOPField(
name='horizontalAlign',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiLabelSetVerticalAlign",
class_name='Element/GUI/Text label|GuiLabel',
method=FunctionData(
signature=FunctionSignature(
name='setVerticalAlign',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theLabel',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='align',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function sets the vertical alignment of a text label.' ,
arguments={
"theLabel": """The text label to set the vertical alignment on. """,
"align": """The alignment type. Valid type strings are:
**"top"
**"center"
**"bottom" """
},
result='returns true on success, false otherwise.' ,
),
url='guiLabelSetVerticalAlign',
),
field=FunctionOOPField(
name='verticalAlign',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiMemoGetCaretIndex",
class_name='GuiMemo',
method=FunctionData(
signature=FunctionSignature(
name='getCaretIndex',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the caret (the text cursor) position within the memo box.' ,
arguments={
"theElement": """The memo box you want to get the caret position from """
},
result='returns the caret index on success, false otherwise.' ,
),
url='guiMemoGetCaretIndex',
),
field=FunctionOOPField(
name='caretIndex',
types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiMemoGetVerticalScrollPosition",
class_name='GuiMemo',
method=FunctionData(
signature=FunctionSignature(
name='getVerticalScrollPosition',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMemo',
argument_type=FunctionType(
names=['gui-memo'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='' ,
arguments={
"theMemo": """: the guiCreateMemo|memo you want to know the vertical scroll position of. """
},
result='returns a float ranging between 0 and 100, or false otherwise.' ,
),
url='guiMemoGetVerticalScrollPosition',
),
field=FunctionOOPField(
name='verticalScrollPosition',
types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiMemoIsReadOnly",
class_name='GuiMemo',
method=FunctionData(
signature=FunctionSignature(
name='isReadOnly',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMemo',
argument_type=FunctionType(
names=['gui-memo'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='' ,
arguments={
"theMemo": """The memo to check read-only status of. """
},
result='returns true if the memo is read only, false if the memo isnt read only, nil otherwise.' ,
),
url='guiMemoIsReadOnly',
),
field=FunctionOOPField(
name='readOnly',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiMemoSetCaretIndex",
class_name='GuiMemo',
method=FunctionData(
signature=FunctionSignature(
name='setCaretIndex',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMemo',
argument_type=FunctionType(
names=['gui-memo'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='index',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function sets the current position of the caret (the text cursor) within the memo.' ,
arguments={
"theMemo": """The memo edit box where the caret position is to be changed. """,
"index": """An integer referring to the desired character position within the box. 0 would be before the first character in the box, 1 before the second, etc. """
},
result='returns true if the caret was successfully moved, false otherwise.' ,
),
url='guiMemoSetCaretIndex',
),
field=FunctionOOPField(
name='caretIndex',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiMemoSetReadOnly",
class_name='Element/GUI/Memo_field|GuiMemo',
method=FunctionData(
signature=FunctionSignature(
name='setReadOnly',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMemo',
argument_type=FunctionType(
names=['gui-memo'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='status',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function allows you to set or remove read-only status for a GUI memo. If read-only is set to true, the contents are not editable.' ,
arguments={
"theMemo": """The memo to change read-only status of. """,
"status": """A boolean value indicating whether read-only is to be enabled or disabled. """
},
result='returns true if the status was successfully changed, false otherwise.' ,
),
url='guiMemoSetReadOnly',
),
field=FunctionOOPField(
name='readOnly',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiMemoSetVerticalScrollPosition",
class_name='GuiMemo',
method=FunctionData(
signature=FunctionSignature(
name='setVerticalScrollPosition',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMemo',
argument_type=FunctionType(
names=['gui-memo'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='position',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='' ,
arguments={
"theMemo": """: the guiCreateMemo|memo you want to change the vertical scroll position of. """,
"position": """: a float ranging between 0 and 100. """
},
result='returns true if the position was set, false otherwise.' ,
),
url='guiMemoSetVerticalScrollPosition',
),
field=FunctionOOPField(
name='verticalScrollPosition',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiMoveToBack",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='moveToBack',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function moves a GUI element to the very back of all other GUI elements.' ,
arguments={
"guiElement": """the GUI element that you want to move to the back """
},
result='returns true if the function was successful, false otherwise.' ,
),
url='guiMoveToBack',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiRadioButtonGetSelected",
class_name='Element/GUI/Radio button|GuiRadioButton',
method=FunctionData(
signature=FunctionSignature(
name='getSelected',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiRadioButton',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function gets a radio buttons selection state.' ,
arguments={
"guiRadioButton": """The radio button you wish to retrieve the selection state of. """
},
result='returns true if the radio button is selected, false if it is not.' ,
),
url='guiRadioButtonGetSelected',
),
field=FunctionOOPField(
name='selected',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiRadioButtonSetSelected",
class_name='Element/GUI/Radio button|GuiRadioButton',
method=FunctionData(
signature=FunctionSignature(
name='setSelected',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiRadioButton',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='state',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function selects or unselects a radio button.' ,
arguments={
"guiRadioButton": """The GUI radio button in which you wish to change the selection state of """,
"state": """The state of the radio button, where true indicates selected, and false indicates unselected. """
},
result='returns true if the radio buttons selection state was successfully set, false otherwise.' ,
),
url='guiRadioButtonSetSelected',
),
field=FunctionOOPField(
name='selected',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiSetAlpha",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='setAlpha',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guielement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='alpha',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This changes the alpha level (the visibleness/transparency) of a GUI element' ,
arguments={
"guiElement": """the GUI element whose visibility is to be changed """,
"alpha": """The visibility/transparency of the GUI element. Ranges from 0 (fully transparent) to 1 (fully opaque). Default value is 0.80. """
},
result='returns true if the gui elements alpha was successfully changed, false otherwise.' ,
),
url='guiSetAlpha',
),
field=FunctionOOPField(
name='alpha',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiSetEnabled",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='setEnabled',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='enabled',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function enables/disables a GUI element. A disabled GUI element cant be used, gets a gray aspect and doesnt receive any events.' ,
arguments={
"guiElement": """the GUI element you wish to enable or disable """,
"enabled": """the new state """
},
result='if the function succeeds it returns true, if it fails it returns false.' ,
),
url='guiSetEnabled',
),
field=FunctionOOPField(
name='enabled',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiSetFont",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='setFont',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='font',
argument_type=FunctionType(
names=['mixed'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function sets the font of a GUI_widgets|GUI element to be used when drawing text.' ,
arguments={
"guiElement": """The GUI element you wish to change the font of """,
"font": """Either a custom GUI font element or the name of a built-in GUI font. See Standard GUI Font Names """
},
result='returns true if the font has been successfully set on the gui element, false otherwise.' ,
),
url='guiSetFont',
),
field=FunctionOOPField(
name='font',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiSetInputEnabled",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='setInputEnabled',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='enabled',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function enables or disables input focus for the GUI. This means that any keybinds or MTA binds are overidden so that text can be input into an editbox, for example. In other words, keys such as t and y which activate the chatbox are disabled.\nguiSetInputMode can be used as an extended version of guiSetInputEnabled since it provides the same functionality with one added feature.' ,
arguments={
"enabled": """true if input should go to GUI, false if it should go to the game. """
},
result='returns true if input mode could be changed, false if invalid parameters are passed.' ,
),
url='guiSetInputEnabled',
),
field=FunctionOOPField(
name='inputEnabled',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiSetInputMode",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='setInputMode',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='mode',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function controls the input mode to define whether or not (and when) keybinds or MTA binds are overridden (disabled) so that text can be input into an editbox, for example.\nThe binds can be either:\n* never disabled (hence using a key such as t in an editbox will still activate the chatbox)\n* always disabled (hence using a key such as t in an editbox will not activate the chatbox)\n* only disabled when actually editing an editbox or a memo (binds are always enabled except when an editbox or memo has input focus)' ,
arguments={
"mode": """a string representing the desired input mode. Accepted values are: """,
"allow_binds": """binds are enabled, hence using a key such as t in an editbox will still activate the chatbox (default) """,
"no_binds": """binds are disabled, hence using a key such as t in an editbox will not activate the chatbox """,
"no_binds_when_editing": """binds are always enabled except when an editable editbox or memo has input focus """
},
result='returns true if input mode could be changed, false if invalid parameters are passed.' ,
),
url='guiSetInputMode',
),
field=FunctionOOPField(
name='inputMode',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiSetPosition",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='setPosition',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='x',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='y',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='relative',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function sets the position of a GUI element.' ,
arguments={
"theElement": """The GUI element to change position for """,
"x": """Position over the X axis """,
"y": """Position over the Y axis """,
"relative": """Bool that indicates if the x/y positions are relative to the elements parent element. """
},
result='returns true if the position has been successfully set, false otherwise.' ,
),
url='guiSetPosition',
),
field=FunctionOOPField(
name='position',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiSetProperty",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='setProperty',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='property',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='value',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function sets the value of a specific CEGUI property of a GUI element. For a list of properties and their meaning, see the http://static.cegui.org.uk/static/WindowsLookProperties.html CEGUI properties page.' ,
arguments={
"guiElement": """the GUI element you wish to get a property of. """,
"property": """the name of of property you want the value of. """,
"value": """the new value for the property. """
},
result='if the function succeeds it returns true, if it fails it returns false.' ,
),
url='guiSetProperty',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiSetSize",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='setSize',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='width',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='height',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='relative',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function sets the dimensions (size) of a GUI element. It refers to the bounding box size for GUI elements. It does not make GUI elements smaller or larger in appearance.' ,
arguments={
"guiElement": """the GUI element whose visibility is to be changed """,
"width": """The desired width setting for the gui element """,
"height": """The desired height setting for the gui element """,
"relative": """This is whether sizes and positioning are relative. If this is true, then all x,y,width,height floats must be between 0 and 1, representing sizes relative to the parent. """
},
result='returns true if the gui elements size was set successfully, false otherwise.' ,
),
url='guiSetSize',
),
field=None,
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiSetText",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='setText',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='text',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function sets the text of a GUI element.' ,
arguments={
"guiElement": """The GUI element you wish to change the text of """,
"text": """The new text """
},
result='returns true if text has been successfully set on the gui element, false otherwise.' ,
),
url='guiSetText',
),
field=FunctionOOPField(
name='text',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiSetVisible",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='setVisible',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='guiElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='state',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function changes the visibility state of a GUI element.' ,
arguments={
"guiElement": """the GUI element whose visibility is to be changed """,
"state": """the new visibility state """
},
result='returns true if the elements visibility could be changed, false otherwise.' ,
),
url='guiSetVisible',
),
field=FunctionOOPField(
name='visible',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiStaticImageLoadImage",
class_name='Element/GUI/Static_image|GuiStaticImage',
method=FunctionData(
signature=FunctionSignature(
name='loadImage',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='filename',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function allows you to change the image in GUI static image element to another one. Tip: If you set other images as children you will have to use setElementCallPropagationEnabled to only affect the parent image.' ,
arguments={
"theElement": """The static image element to be changed. """,
"filename": """A string specifying the filepath of the image file being loaded in current resource. """
},
result='returns true if the the image in the static image element was successfully changed, false otherwise.' ,
),
url='guiStaticImageLoadImage',
),
field=FunctionOOPField(
name='image',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiWindowIsMovable",
class_name='None',
method=None,
field=FunctionOOPField(
name='movable',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=None,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiWindowIsSizable",
class_name='None',
method=None,
field=FunctionOOPField(
name='sizable',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=None,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiWindowSetMovable",
class_name='Element/GUI/Window|GuiWindow',
method=FunctionData(
signature=FunctionSignature(
name='setMovable',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='status',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function allows you to specify whether or not a user can move a GUI window.' ,
arguments={
"theElement": """The window to be changed. """,
"status": """A boolean value indicating whether the window is movable or not. """
},
result='returns true if the function is successful, false otherwise.' ,
),
url='guiWindowSetMovable',
),
field=FunctionOOPField(
name='movable',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="guiWindowSetSizable",
class_name='Element/GUI/Window|GuiWindow',
method=FunctionData(
signature=FunctionSignature(
name='setSizable',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='status',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function enables or disables user resizing of a GUI window.' ,
arguments={
"theElement": """The window to be changed. """,
"status": """A boolean value indicating whether user resizing is to be enabled or disabled. """
},
result='returns true if the function is successful, false otherwise.' ,
),
url='guiWindowSetSizable',
),
field=FunctionOOPField(
name='sizable',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="isChatBoxInputActive",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='isChatBoxInputActive',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns whether the ingame chatbox is being used (accepting chatbox input) or not.' ,
arguments={
},
result='returns true if the chatbox is receiving input, false if not active.' ,
),
url='isChatBoxInputActive',
),
field=FunctionOOPField(
name='chatBoxInputActive',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="isConsoleActive",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='isConsoleActive',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns whether the ingame console window is visible or not.' ,
arguments={
},
result='returns true if the console is visible, false if not.' ,
),
url='isConsoleActive',
),
field=FunctionOOPField(
name='consoleActive',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=False,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="isDebugViewActive",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='isDebugViewActive',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns whether the ingame debug window is visible or not. This is the debugwindow visible using the debugscript <level> command.' ,
arguments={
},
result='returns true if the debug view is visible, false if not.' ,
),
url='isDebugViewActive',
),
field=FunctionOOPField(
name='debugViewActive',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="isMainMenuActive",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='isMainMenuActive',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns whether the user is in the mainmenu or not.' ,
arguments={
},
result='returns true if the mainmenu is visible, false if not.' ,
),
url='isMainMenuActive',
),
field=FunctionOOPField(
name='mainMenuActive',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="isMTAWindowActive",
class_name='GUI_widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='isMTAWindowActive',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns whether any system windows that take focus are active. This includes:\n* Chatbox input\n* Console window\n* Main menu\n* Transferbox\nTo get the status of the debug view, see isDebugViewActive.' ,
arguments={
},
result='returns true if the focus is on the mta window, false if it isnt.' ,
),
url='isMTAWindowActive',
),
field=None,
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="isTransferBoxActive",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='isTransferBoxActive',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns whether the file downloading dialog box is active or not. This appears when a resource is started and the client doesnt have all the files that resource requires the client to have.\nIts important to note that resources arent started on the client until theyre completely downloaded, so a resource cannot use this function to detect if its own files are downloaded. A client-side resource triggers the onClientResourceStart event when the files it requires are downloaded.' ,
arguments={
},
result='returns true if the file transfer box is visible, false if not.' ,
),
url='isTransferBoxActive',
),
field=None,
is_static=True,
)
],
),
CompoundOOPData(
server=[
],
client=[
FunctionOOP(
description=None,
base_function_name="setDebugViewActive",
class_name='GUI widgets|GuiElement',
method=FunctionData(
signature=FunctionSignature(
name='setDebugViewActive',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='enabled',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function enables or disables the debug window.' ,
arguments={
"enabled": """true if debug window should be visible, false otherwise. """
},
result='returns true, false if invalid parameters are passed.' ,
),
url='setDebugViewActive',
),
field=FunctionOOPField(
name='debugViewActive',
types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
),
is_static=True,
)
],
)
]
| 37.903191
| 631
| 0.345208
| 14,948
| 282,682
| 6.410824
| 0.058336
| 0.064396
| 0.056037
| 0.054472
| 0.822142
| 0.800739
| 0.782206
| 0.756848
| 0.737793
| 0.728589
| 0
| 0.000867
| 0.587837
| 282,682
| 7,457
| 632
| 37.908274
| 0.821622
| 0.00018
| 0
| 0.783396
| 1
| 0.007661
| 0.157786
| 0.018569
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.002229
| 0.000279
| 0
| 0.000279
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
c73caaa61d8831ea391bc175e6e21980a9da2c3e
| 239,089
|
py
|
Python
|
sdk/python/pulumi_azure_native/recoveryservices/v20190615/_inputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/recoveryservices/v20190615/_inputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/recoveryservices/v20190615/_inputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'AzureFileshareProtectedItemArgs',
'AzureFileshareProtectedItemExtendedInfoArgs',
'AzureIaaSClassicComputeVMProtectedItemArgs',
'AzureIaaSComputeVMProtectedItemArgs',
'AzureIaaSVMProtectedItemArgs',
'AzureIaaSVMProtectedItemExtendedInfoArgs',
'AzureSqlProtectedItemArgs',
'AzureSqlProtectedItemExtendedInfoArgs',
'AzureVmWorkloadProtectedItemArgs',
'AzureVmWorkloadProtectedItemExtendedInfoArgs',
'AzureVmWorkloadSAPAseDatabaseProtectedItemArgs',
'AzureVmWorkloadSAPHanaDatabaseProtectedItemArgs',
'AzureVmWorkloadSQLDatabaseProtectedItemArgs',
'DPMProtectedItemArgs',
'DPMProtectedItemExtendedInfoArgs',
'DiskExclusionPropertiesArgs',
'ExtendedPropertiesArgs',
'GenericProtectedItemArgs',
'KPIResourceHealthDetailsArgs',
'MabFileFolderProtectedItemArgs',
'MabFileFolderProtectedItemExtendedInfoArgs',
]
@pulumi.input_type
class AzureFileshareProtectedItemArgs:
def __init__(__self__, *,
protected_item_type: pulumi.Input[str],
backup_management_type: Optional[pulumi.Input[Union[str, 'BackupManagementType']]] = None,
backup_set_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None,
deferred_delete_time_in_utc: Optional[pulumi.Input[str]] = None,
deferred_delete_time_remaining: Optional[pulumi.Input[str]] = None,
extended_info: Optional[pulumi.Input['AzureFileshareProtectedItemExtendedInfoArgs']] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
is_deferred_delete_schedule_upcoming: Optional[pulumi.Input[bool]] = None,
is_rehydrate: Optional[pulumi.Input[bool]] = None,
is_scheduled_for_deferred_delete: Optional[pulumi.Input[bool]] = None,
kpis_healths: Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]] = None,
last_backup_status: Optional[pulumi.Input[str]] = None,
last_backup_time: Optional[pulumi.Input[str]] = None,
last_recovery_point: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
protection_state: Optional[pulumi.Input[Union[str, 'ProtectionState']]] = None,
protection_status: Optional[pulumi.Input[str]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
workload_type: Optional[pulumi.Input[Union[str, 'DataSourceType']]] = None):
"""
Azure File Share workload-specific backup item.
:param pulumi.Input[str] protected_item_type: backup item type.
Expected value is 'AzureFileShareProtectedItem'.
:param pulumi.Input[Union[str, 'BackupManagementType']] backup_management_type: Type of backup management for the backed up item.
:param pulumi.Input[str] backup_set_name: Name of the backup set the backup item belongs to
:param pulumi.Input[str] container_name: Unique name of container
:param pulumi.Input[Union[str, 'CreateMode']] create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param pulumi.Input[str] deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param pulumi.Input['AzureFileshareProtectedItemExtendedInfoArgs'] extended_info: Additional information with this backup item.
:param pulumi.Input[str] friendly_name: Friendly name of the fileshare represented by this backup item.
:param pulumi.Input[bool] is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param pulumi.Input[bool] is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param pulumi.Input[bool] is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]] kpis_healths: Health details of different KPIs
:param pulumi.Input[str] last_backup_status: Last backup operation status. Possible values: Healthy, Unhealthy.
:param pulumi.Input[str] last_backup_time: Timestamp of the last backup operation on this backup item.
:param pulumi.Input[str] last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param pulumi.Input[str] policy_id: ID of the backup policy with which this item is backed up.
:param pulumi.Input[Union[str, 'ProtectionState']] protection_state: Backup state of this backup item.
:param pulumi.Input[str] protection_status: Backup status of this backup item.
:param pulumi.Input[str] source_resource_id: ARM ID of the resource to be backed up.
:param pulumi.Input[Union[str, 'DataSourceType']] workload_type: Type of workload this item represents.
"""
pulumi.set(__self__, "protected_item_type", 'AzureFileShareProtectedItem')
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if kpis_healths is not None:
pulumi.set(__self__, "kpis_healths", kpis_healths)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_backup_time is not None:
pulumi.set(__self__, "last_backup_time", last_backup_time)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if protection_status is not None:
pulumi.set(__self__, "protection_status", protection_status)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> pulumi.Input[str]:
"""
backup item type.
Expected value is 'AzureFileShareProtectedItem'.
"""
return pulumi.get(self, "protected_item_type")
@protected_item_type.setter
def protected_item_type(self, value: pulumi.Input[str]):
pulumi.set(self, "protected_item_type", value)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[pulumi.Input[Union[str, 'BackupManagementType']]]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@backup_management_type.setter
def backup_management_type(self, value: Optional[pulumi.Input[Union[str, 'BackupManagementType']]]):
pulumi.set(self, "backup_management_type", value)
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@backup_set_name.setter
def backup_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_set_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[Union[str, 'CreateMode']]]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[Union[str, 'CreateMode']]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[pulumi.Input[str]]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@deferred_delete_time_in_utc.setter
def deferred_delete_time_in_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[pulumi.Input[str]]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@deferred_delete_time_remaining.setter
def deferred_delete_time_remaining(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_remaining", value)
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional[pulumi.Input['AzureFileshareProtectedItemExtendedInfoArgs']]:
"""
Additional information with this backup item.
"""
return pulumi.get(self, "extended_info")
@extended_info.setter
def extended_info(self, value: Optional[pulumi.Input['AzureFileshareProtectedItemExtendedInfoArgs']]):
pulumi.set(self, "extended_info", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of the fileshare represented by this backup item.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@is_deferred_delete_schedule_upcoming.setter
def is_deferred_delete_schedule_upcoming(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_deferred_delete_schedule_upcoming", value)
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@is_rehydrate.setter
def is_rehydrate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_rehydrate", value)
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@is_scheduled_for_deferred_delete.setter
def is_scheduled_for_deferred_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_scheduled_for_deferred_delete", value)
@property
@pulumi.getter(name="kpisHealths")
def kpis_healths(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]]:
"""
Health details of different KPIs
"""
return pulumi.get(self, "kpis_healths")
@kpis_healths.setter
def kpis_healths(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]]):
pulumi.set(self, "kpis_healths", value)
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[pulumi.Input[str]]:
"""
Last backup operation status. Possible values: Healthy, Unhealthy.
"""
return pulumi.get(self, "last_backup_status")
@last_backup_status.setter
def last_backup_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_status", value)
@property
@pulumi.getter(name="lastBackupTime")
def last_backup_time(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp of the last backup operation on this backup item.
"""
return pulumi.get(self, "last_backup_time")
@last_backup_time.setter
def last_backup_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_time", value)
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@last_recovery_point.setter
def last_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_recovery_point", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[pulumi.Input[Union[str, 'ProtectionState']]]:
"""
Backup state of this backup item.
"""
return pulumi.get(self, "protection_state")
@protection_state.setter
def protection_state(self, value: Optional[pulumi.Input[Union[str, 'ProtectionState']]]):
pulumi.set(self, "protection_state", value)
@property
@pulumi.getter(name="protectionStatus")
def protection_status(self) -> Optional[pulumi.Input[str]]:
"""
Backup status of this backup item.
"""
return pulumi.get(self, "protection_status")
@protection_status.setter
def protection_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_status", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[pulumi.Input[Union[str, 'DataSourceType']]]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
@workload_type.setter
def workload_type(self, value: Optional[pulumi.Input[Union[str, 'DataSourceType']]]):
pulumi.set(self, "workload_type", value)
@pulumi.input_type
class AzureFileshareProtectedItemExtendedInfoArgs:
def __init__(__self__, *,
oldest_recovery_point: Optional[pulumi.Input[str]] = None,
policy_state: Optional[pulumi.Input[str]] = None,
recovery_point_count: Optional[pulumi.Input[int]] = None):
"""
Additional information about Azure File Share backup item.
:param pulumi.Input[str] oldest_recovery_point: The oldest backup copy available for this item in the service.
:param pulumi.Input[str] policy_state: Indicates consistency of policy object and policy applied to this backup item.
:param pulumi.Input[int] recovery_point_count: Number of available backup copies associated with this backup item.
"""
if oldest_recovery_point is not None:
pulumi.set(__self__, "oldest_recovery_point", oldest_recovery_point)
if policy_state is not None:
pulumi.set(__self__, "policy_state", policy_state)
if recovery_point_count is not None:
pulumi.set(__self__, "recovery_point_count", recovery_point_count)
@property
@pulumi.getter(name="oldestRecoveryPoint")
def oldest_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
The oldest backup copy available for this item in the service.
"""
return pulumi.get(self, "oldest_recovery_point")
@oldest_recovery_point.setter
def oldest_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oldest_recovery_point", value)
@property
@pulumi.getter(name="policyState")
def policy_state(self) -> Optional[pulumi.Input[str]]:
"""
Indicates consistency of policy object and policy applied to this backup item.
"""
return pulumi.get(self, "policy_state")
@policy_state.setter
def policy_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_state", value)
@property
@pulumi.getter(name="recoveryPointCount")
def recovery_point_count(self) -> Optional[pulumi.Input[int]]:
"""
Number of available backup copies associated with this backup item.
"""
return pulumi.get(self, "recovery_point_count")
@recovery_point_count.setter
def recovery_point_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "recovery_point_count", value)
@pulumi.input_type
class AzureIaaSClassicComputeVMProtectedItemArgs:
def __init__(__self__, *,
protected_item_type: pulumi.Input[str],
backup_management_type: Optional[pulumi.Input[Union[str, 'BackupManagementType']]] = None,
backup_set_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None,
deferred_delete_time_in_utc: Optional[pulumi.Input[str]] = None,
deferred_delete_time_remaining: Optional[pulumi.Input[str]] = None,
extended_info: Optional[pulumi.Input['AzureIaaSVMProtectedItemExtendedInfoArgs']] = None,
extended_properties: Optional[pulumi.Input['ExtendedPropertiesArgs']] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
health_status: Optional[pulumi.Input[Union[str, 'HealthStatus']]] = None,
is_deferred_delete_schedule_upcoming: Optional[pulumi.Input[bool]] = None,
is_rehydrate: Optional[pulumi.Input[bool]] = None,
is_scheduled_for_deferred_delete: Optional[pulumi.Input[bool]] = None,
kpis_healths: Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]] = None,
last_backup_status: Optional[pulumi.Input[str]] = None,
last_backup_time: Optional[pulumi.Input[str]] = None,
last_recovery_point: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
protected_item_data_id: Optional[pulumi.Input[str]] = None,
protection_state: Optional[pulumi.Input[Union[str, 'ProtectionState']]] = None,
protection_status: Optional[pulumi.Input[str]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
virtual_machine_id: Optional[pulumi.Input[str]] = None,
workload_type: Optional[pulumi.Input[Union[str, 'DataSourceType']]] = None):
"""
IaaS VM workload-specific backup item representing the Classic Compute VM.
:param pulumi.Input[str] protected_item_type: backup item type.
Expected value is 'Microsoft.ClassicCompute/virtualMachines'.
:param pulumi.Input[Union[str, 'BackupManagementType']] backup_management_type: Type of backup management for the backed up item.
:param pulumi.Input[str] backup_set_name: Name of the backup set the backup item belongs to
:param pulumi.Input[str] container_name: Unique name of container
:param pulumi.Input[Union[str, 'CreateMode']] create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param pulumi.Input[str] deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param pulumi.Input['AzureIaaSVMProtectedItemExtendedInfoArgs'] extended_info: Additional information for this backup item.
:param pulumi.Input['ExtendedPropertiesArgs'] extended_properties: Extended Properties for Azure IaasVM Backup.
:param pulumi.Input[str] friendly_name: Friendly name of the VM represented by this backup item.
:param pulumi.Input[Union[str, 'HealthStatus']] health_status: Health status of protected item.
:param pulumi.Input[bool] is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param pulumi.Input[bool] is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param pulumi.Input[bool] is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]] kpis_healths: Health details of different KPIs
:param pulumi.Input[str] last_backup_status: Last backup operation status.
:param pulumi.Input[str] last_backup_time: Timestamp of the last backup operation on this backup item.
:param pulumi.Input[str] last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param pulumi.Input[str] policy_id: ID of the backup policy with which this item is backed up.
:param pulumi.Input[str] protected_item_data_id: Data ID of the protected item.
:param pulumi.Input[Union[str, 'ProtectionState']] protection_state: Backup state of this backup item.
:param pulumi.Input[str] protection_status: Backup status of this backup item.
:param pulumi.Input[str] source_resource_id: ARM ID of the resource to be backed up.
:param pulumi.Input[str] virtual_machine_id: Fully qualified ARM ID of the virtual machine represented by this item.
:param pulumi.Input[Union[str, 'DataSourceType']] workload_type: Type of workload this item represents.
"""
pulumi.set(__self__, "protected_item_type", 'Microsoft.ClassicCompute/virtualMachines')
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if extended_properties is not None:
pulumi.set(__self__, "extended_properties", extended_properties)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if health_status is not None:
pulumi.set(__self__, "health_status", health_status)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if kpis_healths is not None:
pulumi.set(__self__, "kpis_healths", kpis_healths)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_backup_time is not None:
pulumi.set(__self__, "last_backup_time", last_backup_time)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protected_item_data_id is not None:
pulumi.set(__self__, "protected_item_data_id", protected_item_data_id)
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if protection_status is not None:
pulumi.set(__self__, "protection_status", protection_status)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if virtual_machine_id is not None:
pulumi.set(__self__, "virtual_machine_id", virtual_machine_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> pulumi.Input[str]:
"""
backup item type.
Expected value is 'Microsoft.ClassicCompute/virtualMachines'.
"""
return pulumi.get(self, "protected_item_type")
@protected_item_type.setter
def protected_item_type(self, value: pulumi.Input[str]):
pulumi.set(self, "protected_item_type", value)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[pulumi.Input[Union[str, 'BackupManagementType']]]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@backup_management_type.setter
def backup_management_type(self, value: Optional[pulumi.Input[Union[str, 'BackupManagementType']]]):
pulumi.set(self, "backup_management_type", value)
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@backup_set_name.setter
def backup_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_set_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[Union[str, 'CreateMode']]]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[Union[str, 'CreateMode']]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[pulumi.Input[str]]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@deferred_delete_time_in_utc.setter
def deferred_delete_time_in_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[pulumi.Input[str]]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@deferred_delete_time_remaining.setter
def deferred_delete_time_remaining(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_remaining", value)
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional[pulumi.Input['AzureIaaSVMProtectedItemExtendedInfoArgs']]:
"""
Additional information for this backup item.
"""
return pulumi.get(self, "extended_info")
@extended_info.setter
def extended_info(self, value: Optional[pulumi.Input['AzureIaaSVMProtectedItemExtendedInfoArgs']]):
pulumi.set(self, "extended_info", value)
@property
@pulumi.getter(name="extendedProperties")
def extended_properties(self) -> Optional[pulumi.Input['ExtendedPropertiesArgs']]:
"""
Extended Properties for Azure IaasVM Backup.
"""
return pulumi.get(self, "extended_properties")
@extended_properties.setter
def extended_properties(self, value: Optional[pulumi.Input['ExtendedPropertiesArgs']]):
pulumi.set(self, "extended_properties", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of the VM represented by this backup item.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="healthStatus")
def health_status(self) -> Optional[pulumi.Input[Union[str, 'HealthStatus']]]:
"""
Health status of protected item.
"""
return pulumi.get(self, "health_status")
@health_status.setter
def health_status(self, value: Optional[pulumi.Input[Union[str, 'HealthStatus']]]):
pulumi.set(self, "health_status", value)
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@is_deferred_delete_schedule_upcoming.setter
def is_deferred_delete_schedule_upcoming(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_deferred_delete_schedule_upcoming", value)
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@is_rehydrate.setter
def is_rehydrate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_rehydrate", value)
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@is_scheduled_for_deferred_delete.setter
def is_scheduled_for_deferred_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_scheduled_for_deferred_delete", value)
@property
@pulumi.getter(name="kpisHealths")
def kpis_healths(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]]:
"""
Health details of different KPIs
"""
return pulumi.get(self, "kpis_healths")
@kpis_healths.setter
def kpis_healths(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]]):
pulumi.set(self, "kpis_healths", value)
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[pulumi.Input[str]]:
"""
Last backup operation status.
"""
return pulumi.get(self, "last_backup_status")
@last_backup_status.setter
def last_backup_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_status", value)
@property
@pulumi.getter(name="lastBackupTime")
def last_backup_time(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp of the last backup operation on this backup item.
"""
return pulumi.get(self, "last_backup_time")
@last_backup_time.setter
def last_backup_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_time", value)
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@last_recovery_point.setter
def last_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_recovery_point", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="protectedItemDataId")
def protected_item_data_id(self) -> Optional[pulumi.Input[str]]:
"""
Data ID of the protected item.
"""
return pulumi.get(self, "protected_item_data_id")
@protected_item_data_id.setter
def protected_item_data_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protected_item_data_id", value)
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[pulumi.Input[Union[str, 'ProtectionState']]]:
"""
Backup state of this backup item.
"""
return pulumi.get(self, "protection_state")
@protection_state.setter
def protection_state(self, value: Optional[pulumi.Input[Union[str, 'ProtectionState']]]):
pulumi.set(self, "protection_state", value)
@property
@pulumi.getter(name="protectionStatus")
def protection_status(self) -> Optional[pulumi.Input[str]]:
"""
Backup status of this backup item.
"""
return pulumi.get(self, "protection_status")
@protection_status.setter
def protection_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_status", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="virtualMachineId")
def virtual_machine_id(self) -> Optional[pulumi.Input[str]]:
"""
Fully qualified ARM ID of the virtual machine represented by this item.
"""
return pulumi.get(self, "virtual_machine_id")
@virtual_machine_id.setter
def virtual_machine_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_machine_id", value)
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[pulumi.Input[Union[str, 'DataSourceType']]]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
@workload_type.setter
def workload_type(self, value: Optional[pulumi.Input[Union[str, 'DataSourceType']]]):
pulumi.set(self, "workload_type", value)
@pulumi.input_type
class AzureIaaSComputeVMProtectedItemArgs:
def __init__(__self__, *,
protected_item_type: pulumi.Input[str],
backup_management_type: Optional[pulumi.Input[Union[str, 'BackupManagementType']]] = None,
backup_set_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None,
deferred_delete_time_in_utc: Optional[pulumi.Input[str]] = None,
deferred_delete_time_remaining: Optional[pulumi.Input[str]] = None,
extended_info: Optional[pulumi.Input['AzureIaaSVMProtectedItemExtendedInfoArgs']] = None,
extended_properties: Optional[pulumi.Input['ExtendedPropertiesArgs']] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
health_status: Optional[pulumi.Input[Union[str, 'HealthStatus']]] = None,
is_deferred_delete_schedule_upcoming: Optional[pulumi.Input[bool]] = None,
is_rehydrate: Optional[pulumi.Input[bool]] = None,
is_scheduled_for_deferred_delete: Optional[pulumi.Input[bool]] = None,
kpis_healths: Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]] = None,
last_backup_status: Optional[pulumi.Input[str]] = None,
last_backup_time: Optional[pulumi.Input[str]] = None,
last_recovery_point: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
protected_item_data_id: Optional[pulumi.Input[str]] = None,
protection_state: Optional[pulumi.Input[Union[str, 'ProtectionState']]] = None,
protection_status: Optional[pulumi.Input[str]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
virtual_machine_id: Optional[pulumi.Input[str]] = None,
workload_type: Optional[pulumi.Input[Union[str, 'DataSourceType']]] = None):
"""
IaaS VM workload-specific backup item representing the Azure Resource Manager VM.
:param pulumi.Input[str] protected_item_type: backup item type.
Expected value is 'Microsoft.Compute/virtualMachines'.
:param pulumi.Input[Union[str, 'BackupManagementType']] backup_management_type: Type of backup management for the backed up item.
:param pulumi.Input[str] backup_set_name: Name of the backup set the backup item belongs to
:param pulumi.Input[str] container_name: Unique name of container
:param pulumi.Input[Union[str, 'CreateMode']] create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param pulumi.Input[str] deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param pulumi.Input['AzureIaaSVMProtectedItemExtendedInfoArgs'] extended_info: Additional information for this backup item.
:param pulumi.Input['ExtendedPropertiesArgs'] extended_properties: Extended Properties for Azure IaasVM Backup.
:param pulumi.Input[str] friendly_name: Friendly name of the VM represented by this backup item.
:param pulumi.Input[Union[str, 'HealthStatus']] health_status: Health status of protected item.
:param pulumi.Input[bool] is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param pulumi.Input[bool] is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param pulumi.Input[bool] is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]] kpis_healths: Health details of different KPIs
:param pulumi.Input[str] last_backup_status: Last backup operation status.
:param pulumi.Input[str] last_backup_time: Timestamp of the last backup operation on this backup item.
:param pulumi.Input[str] last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param pulumi.Input[str] policy_id: ID of the backup policy with which this item is backed up.
:param pulumi.Input[str] protected_item_data_id: Data ID of the protected item.
:param pulumi.Input[Union[str, 'ProtectionState']] protection_state: Backup state of this backup item.
:param pulumi.Input[str] protection_status: Backup status of this backup item.
:param pulumi.Input[str] source_resource_id: ARM ID of the resource to be backed up.
:param pulumi.Input[str] virtual_machine_id: Fully qualified ARM ID of the virtual machine represented by this item.
:param pulumi.Input[Union[str, 'DataSourceType']] workload_type: Type of workload this item represents.
"""
pulumi.set(__self__, "protected_item_type", 'Microsoft.Compute/virtualMachines')
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if extended_properties is not None:
pulumi.set(__self__, "extended_properties", extended_properties)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if health_status is not None:
pulumi.set(__self__, "health_status", health_status)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if kpis_healths is not None:
pulumi.set(__self__, "kpis_healths", kpis_healths)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_backup_time is not None:
pulumi.set(__self__, "last_backup_time", last_backup_time)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protected_item_data_id is not None:
pulumi.set(__self__, "protected_item_data_id", protected_item_data_id)
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if protection_status is not None:
pulumi.set(__self__, "protection_status", protection_status)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if virtual_machine_id is not None:
pulumi.set(__self__, "virtual_machine_id", virtual_machine_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> pulumi.Input[str]:
"""
backup item type.
Expected value is 'Microsoft.Compute/virtualMachines'.
"""
return pulumi.get(self, "protected_item_type")
@protected_item_type.setter
def protected_item_type(self, value: pulumi.Input[str]):
pulumi.set(self, "protected_item_type", value)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[pulumi.Input[Union[str, 'BackupManagementType']]]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@backup_management_type.setter
def backup_management_type(self, value: Optional[pulumi.Input[Union[str, 'BackupManagementType']]]):
pulumi.set(self, "backup_management_type", value)
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@backup_set_name.setter
def backup_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_set_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[Union[str, 'CreateMode']]]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[Union[str, 'CreateMode']]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[pulumi.Input[str]]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@deferred_delete_time_in_utc.setter
def deferred_delete_time_in_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[pulumi.Input[str]]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@deferred_delete_time_remaining.setter
def deferred_delete_time_remaining(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_remaining", value)
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional[pulumi.Input['AzureIaaSVMProtectedItemExtendedInfoArgs']]:
"""
Additional information for this backup item.
"""
return pulumi.get(self, "extended_info")
@extended_info.setter
def extended_info(self, value: Optional[pulumi.Input['AzureIaaSVMProtectedItemExtendedInfoArgs']]):
pulumi.set(self, "extended_info", value)
@property
@pulumi.getter(name="extendedProperties")
def extended_properties(self) -> Optional[pulumi.Input['ExtendedPropertiesArgs']]:
"""
Extended Properties for Azure IaasVM Backup.
"""
return pulumi.get(self, "extended_properties")
@extended_properties.setter
def extended_properties(self, value: Optional[pulumi.Input['ExtendedPropertiesArgs']]):
pulumi.set(self, "extended_properties", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of the VM represented by this backup item.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="healthStatus")
def health_status(self) -> Optional[pulumi.Input[Union[str, 'HealthStatus']]]:
"""
Health status of protected item.
"""
return pulumi.get(self, "health_status")
@health_status.setter
def health_status(self, value: Optional[pulumi.Input[Union[str, 'HealthStatus']]]):
pulumi.set(self, "health_status", value)
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@is_deferred_delete_schedule_upcoming.setter
def is_deferred_delete_schedule_upcoming(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_deferred_delete_schedule_upcoming", value)
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@is_rehydrate.setter
def is_rehydrate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_rehydrate", value)
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@is_scheduled_for_deferred_delete.setter
def is_scheduled_for_deferred_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_scheduled_for_deferred_delete", value)
@property
@pulumi.getter(name="kpisHealths")
def kpis_healths(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]]:
"""
Health details of different KPIs
"""
return pulumi.get(self, "kpis_healths")
@kpis_healths.setter
def kpis_healths(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]]):
pulumi.set(self, "kpis_healths", value)
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[pulumi.Input[str]]:
"""
Last backup operation status.
"""
return pulumi.get(self, "last_backup_status")
@last_backup_status.setter
def last_backup_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_status", value)
@property
@pulumi.getter(name="lastBackupTime")
def last_backup_time(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp of the last backup operation on this backup item.
"""
return pulumi.get(self, "last_backup_time")
@last_backup_time.setter
def last_backup_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_time", value)
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@last_recovery_point.setter
def last_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_recovery_point", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="protectedItemDataId")
def protected_item_data_id(self) -> Optional[pulumi.Input[str]]:
"""
Data ID of the protected item.
"""
return pulumi.get(self, "protected_item_data_id")
@protected_item_data_id.setter
def protected_item_data_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protected_item_data_id", value)
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[pulumi.Input[Union[str, 'ProtectionState']]]:
"""
Backup state of this backup item.
"""
return pulumi.get(self, "protection_state")
@protection_state.setter
def protection_state(self, value: Optional[pulumi.Input[Union[str, 'ProtectionState']]]):
pulumi.set(self, "protection_state", value)
@property
@pulumi.getter(name="protectionStatus")
def protection_status(self) -> Optional[pulumi.Input[str]]:
"""
Backup status of this backup item.
"""
return pulumi.get(self, "protection_status")
@protection_status.setter
def protection_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_status", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="virtualMachineId")
def virtual_machine_id(self) -> Optional[pulumi.Input[str]]:
"""
Fully qualified ARM ID of the virtual machine represented by this item.
"""
return pulumi.get(self, "virtual_machine_id")
@virtual_machine_id.setter
def virtual_machine_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_machine_id", value)
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[pulumi.Input[Union[str, 'DataSourceType']]]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
@workload_type.setter
def workload_type(self, value: Optional[pulumi.Input[Union[str, 'DataSourceType']]]):
pulumi.set(self, "workload_type", value)
@pulumi.input_type
class AzureIaaSVMProtectedItemArgs:
def __init__(__self__, *,
protected_item_type: pulumi.Input[str],
backup_management_type: Optional[pulumi.Input[Union[str, 'BackupManagementType']]] = None,
backup_set_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None,
deferred_delete_time_in_utc: Optional[pulumi.Input[str]] = None,
deferred_delete_time_remaining: Optional[pulumi.Input[str]] = None,
extended_info: Optional[pulumi.Input['AzureIaaSVMProtectedItemExtendedInfoArgs']] = None,
extended_properties: Optional[pulumi.Input['ExtendedPropertiesArgs']] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
health_status: Optional[pulumi.Input[Union[str, 'HealthStatus']]] = None,
is_deferred_delete_schedule_upcoming: Optional[pulumi.Input[bool]] = None,
is_rehydrate: Optional[pulumi.Input[bool]] = None,
is_scheduled_for_deferred_delete: Optional[pulumi.Input[bool]] = None,
kpis_healths: Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]] = None,
last_backup_status: Optional[pulumi.Input[str]] = None,
last_backup_time: Optional[pulumi.Input[str]] = None,
last_recovery_point: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
protected_item_data_id: Optional[pulumi.Input[str]] = None,
protection_state: Optional[pulumi.Input[Union[str, 'ProtectionState']]] = None,
protection_status: Optional[pulumi.Input[str]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
virtual_machine_id: Optional[pulumi.Input[str]] = None,
workload_type: Optional[pulumi.Input[Union[str, 'DataSourceType']]] = None):
"""
IaaS VM workload-specific backup item.
:param pulumi.Input[str] protected_item_type: backup item type.
Expected value is 'AzureIaaSVMProtectedItem'.
:param pulumi.Input[Union[str, 'BackupManagementType']] backup_management_type: Type of backup management for the backed up item.
:param pulumi.Input[str] backup_set_name: Name of the backup set the backup item belongs to
:param pulumi.Input[str] container_name: Unique name of container
:param pulumi.Input[Union[str, 'CreateMode']] create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param pulumi.Input[str] deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param pulumi.Input['AzureIaaSVMProtectedItemExtendedInfoArgs'] extended_info: Additional information for this backup item.
:param pulumi.Input['ExtendedPropertiesArgs'] extended_properties: Extended Properties for Azure IaasVM Backup.
:param pulumi.Input[str] friendly_name: Friendly name of the VM represented by this backup item.
:param pulumi.Input[Union[str, 'HealthStatus']] health_status: Health status of protected item.
:param pulumi.Input[bool] is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param pulumi.Input[bool] is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param pulumi.Input[bool] is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]] kpis_healths: Health details of different KPIs
:param pulumi.Input[str] last_backup_status: Last backup operation status.
:param pulumi.Input[str] last_backup_time: Timestamp of the last backup operation on this backup item.
:param pulumi.Input[str] last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param pulumi.Input[str] policy_id: ID of the backup policy with which this item is backed up.
:param pulumi.Input[str] protected_item_data_id: Data ID of the protected item.
:param pulumi.Input[Union[str, 'ProtectionState']] protection_state: Backup state of this backup item.
:param pulumi.Input[str] protection_status: Backup status of this backup item.
:param pulumi.Input[str] source_resource_id: ARM ID of the resource to be backed up.
:param pulumi.Input[str] virtual_machine_id: Fully qualified ARM ID of the virtual machine represented by this item.
:param pulumi.Input[Union[str, 'DataSourceType']] workload_type: Type of workload this item represents.
"""
pulumi.set(__self__, "protected_item_type", 'AzureIaaSVMProtectedItem')
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if extended_properties is not None:
pulumi.set(__self__, "extended_properties", extended_properties)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if health_status is not None:
pulumi.set(__self__, "health_status", health_status)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if kpis_healths is not None:
pulumi.set(__self__, "kpis_healths", kpis_healths)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_backup_time is not None:
pulumi.set(__self__, "last_backup_time", last_backup_time)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protected_item_data_id is not None:
pulumi.set(__self__, "protected_item_data_id", protected_item_data_id)
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if protection_status is not None:
pulumi.set(__self__, "protection_status", protection_status)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if virtual_machine_id is not None:
pulumi.set(__self__, "virtual_machine_id", virtual_machine_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> pulumi.Input[str]:
"""
backup item type.
Expected value is 'AzureIaaSVMProtectedItem'.
"""
return pulumi.get(self, "protected_item_type")
@protected_item_type.setter
def protected_item_type(self, value: pulumi.Input[str]):
pulumi.set(self, "protected_item_type", value)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[pulumi.Input[Union[str, 'BackupManagementType']]]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@backup_management_type.setter
def backup_management_type(self, value: Optional[pulumi.Input[Union[str, 'BackupManagementType']]]):
pulumi.set(self, "backup_management_type", value)
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@backup_set_name.setter
def backup_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_set_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[Union[str, 'CreateMode']]]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[Union[str, 'CreateMode']]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[pulumi.Input[str]]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@deferred_delete_time_in_utc.setter
def deferred_delete_time_in_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[pulumi.Input[str]]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@deferred_delete_time_remaining.setter
def deferred_delete_time_remaining(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_remaining", value)
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional[pulumi.Input['AzureIaaSVMProtectedItemExtendedInfoArgs']]:
"""
Additional information for this backup item.
"""
return pulumi.get(self, "extended_info")
@extended_info.setter
def extended_info(self, value: Optional[pulumi.Input['AzureIaaSVMProtectedItemExtendedInfoArgs']]):
pulumi.set(self, "extended_info", value)
@property
@pulumi.getter(name="extendedProperties")
def extended_properties(self) -> Optional[pulumi.Input['ExtendedPropertiesArgs']]:
"""
Extended Properties for Azure IaasVM Backup.
"""
return pulumi.get(self, "extended_properties")
@extended_properties.setter
def extended_properties(self, value: Optional[pulumi.Input['ExtendedPropertiesArgs']]):
pulumi.set(self, "extended_properties", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of the VM represented by this backup item.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="healthStatus")
def health_status(self) -> Optional[pulumi.Input[Union[str, 'HealthStatus']]]:
"""
Health status of protected item.
"""
return pulumi.get(self, "health_status")
@health_status.setter
def health_status(self, value: Optional[pulumi.Input[Union[str, 'HealthStatus']]]):
pulumi.set(self, "health_status", value)
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@is_deferred_delete_schedule_upcoming.setter
def is_deferred_delete_schedule_upcoming(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_deferred_delete_schedule_upcoming", value)
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@is_rehydrate.setter
def is_rehydrate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_rehydrate", value)
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@is_scheduled_for_deferred_delete.setter
def is_scheduled_for_deferred_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_scheduled_for_deferred_delete", value)
@property
@pulumi.getter(name="kpisHealths")
def kpis_healths(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]]:
"""
Health details of different KPIs
"""
return pulumi.get(self, "kpis_healths")
@kpis_healths.setter
def kpis_healths(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]]):
pulumi.set(self, "kpis_healths", value)
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[pulumi.Input[str]]:
"""
Last backup operation status.
"""
return pulumi.get(self, "last_backup_status")
@last_backup_status.setter
def last_backup_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_status", value)
@property
@pulumi.getter(name="lastBackupTime")
def last_backup_time(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp of the last backup operation on this backup item.
"""
return pulumi.get(self, "last_backup_time")
@last_backup_time.setter
def last_backup_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_time", value)
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@last_recovery_point.setter
def last_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_recovery_point", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="protectedItemDataId")
def protected_item_data_id(self) -> Optional[pulumi.Input[str]]:
"""
Data ID of the protected item.
"""
return pulumi.get(self, "protected_item_data_id")
@protected_item_data_id.setter
def protected_item_data_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protected_item_data_id", value)
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[pulumi.Input[Union[str, 'ProtectionState']]]:
"""
Backup state of this backup item.
"""
return pulumi.get(self, "protection_state")
@protection_state.setter
def protection_state(self, value: Optional[pulumi.Input[Union[str, 'ProtectionState']]]):
pulumi.set(self, "protection_state", value)
@property
@pulumi.getter(name="protectionStatus")
def protection_status(self) -> Optional[pulumi.Input[str]]:
"""
Backup status of this backup item.
"""
return pulumi.get(self, "protection_status")
@protection_status.setter
def protection_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_status", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="virtualMachineId")
def virtual_machine_id(self) -> Optional[pulumi.Input[str]]:
"""
Fully qualified ARM ID of the virtual machine represented by this item.
"""
return pulumi.get(self, "virtual_machine_id")
@virtual_machine_id.setter
def virtual_machine_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_machine_id", value)
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[pulumi.Input[Union[str, 'DataSourceType']]]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
@workload_type.setter
def workload_type(self, value: Optional[pulumi.Input[Union[str, 'DataSourceType']]]):
pulumi.set(self, "workload_type", value)
@pulumi.input_type
class AzureIaaSVMProtectedItemExtendedInfoArgs:
def __init__(__self__, *,
oldest_recovery_point: Optional[pulumi.Input[str]] = None,
policy_inconsistent: Optional[pulumi.Input[bool]] = None,
recovery_point_count: Optional[pulumi.Input[int]] = None):
"""
Additional information on Azure IaaS VM specific backup item.
:param pulumi.Input[str] oldest_recovery_point: The oldest backup copy available for this backup item.
:param pulumi.Input[bool] policy_inconsistent: Specifies if backup policy associated with the backup item is inconsistent.
:param pulumi.Input[int] recovery_point_count: Number of backup copies available for this backup item.
"""
if oldest_recovery_point is not None:
pulumi.set(__self__, "oldest_recovery_point", oldest_recovery_point)
if policy_inconsistent is not None:
pulumi.set(__self__, "policy_inconsistent", policy_inconsistent)
if recovery_point_count is not None:
pulumi.set(__self__, "recovery_point_count", recovery_point_count)
@property
@pulumi.getter(name="oldestRecoveryPoint")
def oldest_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
The oldest backup copy available for this backup item.
"""
return pulumi.get(self, "oldest_recovery_point")
@oldest_recovery_point.setter
def oldest_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oldest_recovery_point", value)
@property
@pulumi.getter(name="policyInconsistent")
def policy_inconsistent(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies if backup policy associated with the backup item is inconsistent.
"""
return pulumi.get(self, "policy_inconsistent")
@policy_inconsistent.setter
def policy_inconsistent(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "policy_inconsistent", value)
@property
@pulumi.getter(name="recoveryPointCount")
def recovery_point_count(self) -> Optional[pulumi.Input[int]]:
"""
Number of backup copies available for this backup item.
"""
return pulumi.get(self, "recovery_point_count")
@recovery_point_count.setter
def recovery_point_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "recovery_point_count", value)
@pulumi.input_type
class AzureSqlProtectedItemArgs:
def __init__(__self__, *,
protected_item_type: pulumi.Input[str],
backup_management_type: Optional[pulumi.Input[Union[str, 'BackupManagementType']]] = None,
backup_set_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None,
deferred_delete_time_in_utc: Optional[pulumi.Input[str]] = None,
deferred_delete_time_remaining: Optional[pulumi.Input[str]] = None,
extended_info: Optional[pulumi.Input['AzureSqlProtectedItemExtendedInfoArgs']] = None,
is_deferred_delete_schedule_upcoming: Optional[pulumi.Input[bool]] = None,
is_rehydrate: Optional[pulumi.Input[bool]] = None,
is_scheduled_for_deferred_delete: Optional[pulumi.Input[bool]] = None,
last_recovery_point: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
protected_item_data_id: Optional[pulumi.Input[str]] = None,
protection_state: Optional[pulumi.Input[Union[str, 'ProtectedItemState']]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
workload_type: Optional[pulumi.Input[Union[str, 'DataSourceType']]] = None):
"""
Azure SQL workload-specific backup item.
:param pulumi.Input[str] protected_item_type: backup item type.
Expected value is 'Microsoft.Sql/servers/databases'.
:param pulumi.Input[Union[str, 'BackupManagementType']] backup_management_type: Type of backup management for the backed up item.
:param pulumi.Input[str] backup_set_name: Name of the backup set the backup item belongs to
:param pulumi.Input[str] container_name: Unique name of container
:param pulumi.Input[Union[str, 'CreateMode']] create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param pulumi.Input[str] deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param pulumi.Input['AzureSqlProtectedItemExtendedInfoArgs'] extended_info: Additional information for this backup item.
:param pulumi.Input[bool] is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param pulumi.Input[bool] is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param pulumi.Input[bool] is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param pulumi.Input[str] last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param pulumi.Input[str] policy_id: ID of the backup policy with which this item is backed up.
:param pulumi.Input[str] protected_item_data_id: Internal ID of a backup item. Used by Azure SQL Backup engine to contact Recovery Services.
:param pulumi.Input[Union[str, 'ProtectedItemState']] protection_state: Backup state of the backed up item.
:param pulumi.Input[str] source_resource_id: ARM ID of the resource to be backed up.
:param pulumi.Input[Union[str, 'DataSourceType']] workload_type: Type of workload this item represents.
"""
pulumi.set(__self__, "protected_item_type", 'Microsoft.Sql/servers/databases')
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protected_item_data_id is not None:
pulumi.set(__self__, "protected_item_data_id", protected_item_data_id)
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> pulumi.Input[str]:
"""
backup item type.
Expected value is 'Microsoft.Sql/servers/databases'.
"""
return pulumi.get(self, "protected_item_type")
@protected_item_type.setter
def protected_item_type(self, value: pulumi.Input[str]):
pulumi.set(self, "protected_item_type", value)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[pulumi.Input[Union[str, 'BackupManagementType']]]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@backup_management_type.setter
def backup_management_type(self, value: Optional[pulumi.Input[Union[str, 'BackupManagementType']]]):
pulumi.set(self, "backup_management_type", value)
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@backup_set_name.setter
def backup_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_set_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[Union[str, 'CreateMode']]]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[Union[str, 'CreateMode']]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[pulumi.Input[str]]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@deferred_delete_time_in_utc.setter
def deferred_delete_time_in_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[pulumi.Input[str]]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@deferred_delete_time_remaining.setter
def deferred_delete_time_remaining(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_remaining", value)
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional[pulumi.Input['AzureSqlProtectedItemExtendedInfoArgs']]:
"""
Additional information for this backup item.
"""
return pulumi.get(self, "extended_info")
@extended_info.setter
def extended_info(self, value: Optional[pulumi.Input['AzureSqlProtectedItemExtendedInfoArgs']]):
pulumi.set(self, "extended_info", value)
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@is_deferred_delete_schedule_upcoming.setter
def is_deferred_delete_schedule_upcoming(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_deferred_delete_schedule_upcoming", value)
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@is_rehydrate.setter
def is_rehydrate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_rehydrate", value)
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@is_scheduled_for_deferred_delete.setter
def is_scheduled_for_deferred_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_scheduled_for_deferred_delete", value)
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@last_recovery_point.setter
def last_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_recovery_point", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="protectedItemDataId")
def protected_item_data_id(self) -> Optional[pulumi.Input[str]]:
"""
Internal ID of a backup item. Used by Azure SQL Backup engine to contact Recovery Services.
"""
return pulumi.get(self, "protected_item_data_id")
@protected_item_data_id.setter
def protected_item_data_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protected_item_data_id", value)
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[pulumi.Input[Union[str, 'ProtectedItemState']]]:
"""
Backup state of the backed up item.
"""
return pulumi.get(self, "protection_state")
@protection_state.setter
def protection_state(self, value: Optional[pulumi.Input[Union[str, 'ProtectedItemState']]]):
pulumi.set(self, "protection_state", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[pulumi.Input[Union[str, 'DataSourceType']]]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
@workload_type.setter
def workload_type(self, value: Optional[pulumi.Input[Union[str, 'DataSourceType']]]):
pulumi.set(self, "workload_type", value)
@pulumi.input_type
class AzureSqlProtectedItemExtendedInfoArgs:
def __init__(__self__, *,
oldest_recovery_point: Optional[pulumi.Input[str]] = None,
policy_state: Optional[pulumi.Input[str]] = None,
recovery_point_count: Optional[pulumi.Input[int]] = None):
"""
Additional information on Azure Sql specific protected item.
:param pulumi.Input[str] oldest_recovery_point: The oldest backup copy available for this item in the service.
:param pulumi.Input[str] policy_state: State of the backup policy associated with this backup item.
:param pulumi.Input[int] recovery_point_count: Number of available backup copies associated with this backup item.
"""
if oldest_recovery_point is not None:
pulumi.set(__self__, "oldest_recovery_point", oldest_recovery_point)
if policy_state is not None:
pulumi.set(__self__, "policy_state", policy_state)
if recovery_point_count is not None:
pulumi.set(__self__, "recovery_point_count", recovery_point_count)
@property
@pulumi.getter(name="oldestRecoveryPoint")
def oldest_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
The oldest backup copy available for this item in the service.
"""
return pulumi.get(self, "oldest_recovery_point")
@oldest_recovery_point.setter
def oldest_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oldest_recovery_point", value)
@property
@pulumi.getter(name="policyState")
def policy_state(self) -> Optional[pulumi.Input[str]]:
"""
State of the backup policy associated with this backup item.
"""
return pulumi.get(self, "policy_state")
@policy_state.setter
def policy_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_state", value)
@property
@pulumi.getter(name="recoveryPointCount")
def recovery_point_count(self) -> Optional[pulumi.Input[int]]:
"""
Number of available backup copies associated with this backup item.
"""
return pulumi.get(self, "recovery_point_count")
@recovery_point_count.setter
def recovery_point_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "recovery_point_count", value)
@pulumi.input_type
class AzureVmWorkloadProtectedItemArgs:
def __init__(__self__, *,
protected_item_type: pulumi.Input[str],
backup_management_type: Optional[pulumi.Input[Union[str, 'BackupManagementType']]] = None,
backup_set_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None,
deferred_delete_time_in_utc: Optional[pulumi.Input[str]] = None,
deferred_delete_time_remaining: Optional[pulumi.Input[str]] = None,
extended_info: Optional[pulumi.Input['AzureVmWorkloadProtectedItemExtendedInfoArgs']] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
is_deferred_delete_schedule_upcoming: Optional[pulumi.Input[bool]] = None,
is_rehydrate: Optional[pulumi.Input[bool]] = None,
is_scheduled_for_deferred_delete: Optional[pulumi.Input[bool]] = None,
kpis_healths: Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]] = None,
last_backup_status: Optional[pulumi.Input[Union[str, 'LastBackupStatus']]] = None,
last_backup_time: Optional[pulumi.Input[str]] = None,
last_recovery_point: Optional[pulumi.Input[str]] = None,
parent_name: Optional[pulumi.Input[str]] = None,
parent_type: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
protected_item_data_source_id: Optional[pulumi.Input[str]] = None,
protected_item_health_status: Optional[pulumi.Input[Union[str, 'ProtectedItemHealthStatus']]] = None,
protection_state: Optional[pulumi.Input[Union[str, 'ProtectionState']]] = None,
protection_status: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
workload_type: Optional[pulumi.Input[Union[str, 'DataSourceType']]] = None):
"""
Azure VM workload-specific protected item.
:param pulumi.Input[str] protected_item_type: backup item type.
Expected value is 'AzureVmWorkloadProtectedItem'.
:param pulumi.Input[Union[str, 'BackupManagementType']] backup_management_type: Type of backup management for the backed up item.
:param pulumi.Input[str] backup_set_name: Name of the backup set the backup item belongs to
:param pulumi.Input[str] container_name: Unique name of container
:param pulumi.Input[Union[str, 'CreateMode']] create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param pulumi.Input[str] deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param pulumi.Input['AzureVmWorkloadProtectedItemExtendedInfoArgs'] extended_info: Additional information for this backup item.
:param pulumi.Input[str] friendly_name: Friendly name of the DB represented by this backup item.
:param pulumi.Input[bool] is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param pulumi.Input[bool] is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param pulumi.Input[bool] is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]] kpis_healths: Health details of different KPIs
:param pulumi.Input[Union[str, 'LastBackupStatus']] last_backup_status: Last backup operation status. Possible values: Healthy, Unhealthy.
:param pulumi.Input[str] last_backup_time: Timestamp of the last backup operation on this backup item.
:param pulumi.Input[str] last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param pulumi.Input[str] parent_name: Parent name of the DB such as Instance or Availability Group.
:param pulumi.Input[str] parent_type: Parent type of protected item, example: for a DB, standalone server or distributed
:param pulumi.Input[str] policy_id: ID of the backup policy with which this item is backed up.
:param pulumi.Input[str] protected_item_data_source_id: Data ID of the protected item.
:param pulumi.Input[Union[str, 'ProtectedItemHealthStatus']] protected_item_health_status: Health status of the backup item, evaluated based on last heartbeat received
:param pulumi.Input[Union[str, 'ProtectionState']] protection_state: Backup state of this backup item.
:param pulumi.Input[str] protection_status: Backup status of this backup item.
:param pulumi.Input[str] server_name: Host/Cluster Name for instance or AG
:param pulumi.Input[str] source_resource_id: ARM ID of the resource to be backed up.
:param pulumi.Input[Union[str, 'DataSourceType']] workload_type: Type of workload this item represents.
"""
pulumi.set(__self__, "protected_item_type", 'AzureVmWorkloadProtectedItem')
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if kpis_healths is not None:
pulumi.set(__self__, "kpis_healths", kpis_healths)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_backup_time is not None:
pulumi.set(__self__, "last_backup_time", last_backup_time)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if parent_name is not None:
pulumi.set(__self__, "parent_name", parent_name)
if parent_type is not None:
pulumi.set(__self__, "parent_type", parent_type)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protected_item_data_source_id is not None:
pulumi.set(__self__, "protected_item_data_source_id", protected_item_data_source_id)
if protected_item_health_status is not None:
pulumi.set(__self__, "protected_item_health_status", protected_item_health_status)
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if protection_status is not None:
pulumi.set(__self__, "protection_status", protection_status)
if server_name is not None:
pulumi.set(__self__, "server_name", server_name)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> pulumi.Input[str]:
"""
backup item type.
Expected value is 'AzureVmWorkloadProtectedItem'.
"""
return pulumi.get(self, "protected_item_type")
@protected_item_type.setter
def protected_item_type(self, value: pulumi.Input[str]):
pulumi.set(self, "protected_item_type", value)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[pulumi.Input[Union[str, 'BackupManagementType']]]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@backup_management_type.setter
def backup_management_type(self, value: Optional[pulumi.Input[Union[str, 'BackupManagementType']]]):
pulumi.set(self, "backup_management_type", value)
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@backup_set_name.setter
def backup_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_set_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[Union[str, 'CreateMode']]]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[Union[str, 'CreateMode']]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[pulumi.Input[str]]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@deferred_delete_time_in_utc.setter
def deferred_delete_time_in_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[pulumi.Input[str]]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@deferred_delete_time_remaining.setter
def deferred_delete_time_remaining(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_remaining", value)
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional[pulumi.Input['AzureVmWorkloadProtectedItemExtendedInfoArgs']]:
"""
Additional information for this backup item.
"""
return pulumi.get(self, "extended_info")
@extended_info.setter
def extended_info(self, value: Optional[pulumi.Input['AzureVmWorkloadProtectedItemExtendedInfoArgs']]):
pulumi.set(self, "extended_info", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of the DB represented by this backup item.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@is_deferred_delete_schedule_upcoming.setter
def is_deferred_delete_schedule_upcoming(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_deferred_delete_schedule_upcoming", value)
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@is_rehydrate.setter
def is_rehydrate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_rehydrate", value)
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@is_scheduled_for_deferred_delete.setter
def is_scheduled_for_deferred_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_scheduled_for_deferred_delete", value)
@property
@pulumi.getter(name="kpisHealths")
def kpis_healths(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]]:
"""
Health details of different KPIs
"""
return pulumi.get(self, "kpis_healths")
@kpis_healths.setter
def kpis_healths(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]]):
pulumi.set(self, "kpis_healths", value)
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[pulumi.Input[Union[str, 'LastBackupStatus']]]:
"""
Last backup operation status. Possible values: Healthy, Unhealthy.
"""
return pulumi.get(self, "last_backup_status")
@last_backup_status.setter
def last_backup_status(self, value: Optional[pulumi.Input[Union[str, 'LastBackupStatus']]]):
pulumi.set(self, "last_backup_status", value)
@property
@pulumi.getter(name="lastBackupTime")
def last_backup_time(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp of the last backup operation on this backup item.
"""
return pulumi.get(self, "last_backup_time")
@last_backup_time.setter
def last_backup_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_time", value)
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@last_recovery_point.setter
def last_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_recovery_point", value)
@property
@pulumi.getter(name="parentName")
def parent_name(self) -> Optional[pulumi.Input[str]]:
"""
Parent name of the DB such as Instance or Availability Group.
"""
return pulumi.get(self, "parent_name")
@parent_name.setter
def parent_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent_name", value)
@property
@pulumi.getter(name="parentType")
def parent_type(self) -> Optional[pulumi.Input[str]]:
"""
Parent type of protected item, example: for a DB, standalone server or distributed
"""
return pulumi.get(self, "parent_type")
@parent_type.setter
def parent_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent_type", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="protectedItemDataSourceId")
def protected_item_data_source_id(self) -> Optional[pulumi.Input[str]]:
"""
Data ID of the protected item.
"""
return pulumi.get(self, "protected_item_data_source_id")
@protected_item_data_source_id.setter
def protected_item_data_source_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protected_item_data_source_id", value)
@property
@pulumi.getter(name="protectedItemHealthStatus")
def protected_item_health_status(self) -> Optional[pulumi.Input[Union[str, 'ProtectedItemHealthStatus']]]:
"""
Health status of the backup item, evaluated based on last heartbeat received
"""
return pulumi.get(self, "protected_item_health_status")
@protected_item_health_status.setter
def protected_item_health_status(self, value: Optional[pulumi.Input[Union[str, 'ProtectedItemHealthStatus']]]):
pulumi.set(self, "protected_item_health_status", value)
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[pulumi.Input[Union[str, 'ProtectionState']]]:
"""
Backup state of this backup item.
"""
return pulumi.get(self, "protection_state")
@protection_state.setter
def protection_state(self, value: Optional[pulumi.Input[Union[str, 'ProtectionState']]]):
pulumi.set(self, "protection_state", value)
@property
@pulumi.getter(name="protectionStatus")
def protection_status(self) -> Optional[pulumi.Input[str]]:
"""
Backup status of this backup item.
"""
return pulumi.get(self, "protection_status")
@protection_status.setter
def protection_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_status", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> Optional[pulumi.Input[str]]:
"""
Host/Cluster Name for instance or AG
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[pulumi.Input[Union[str, 'DataSourceType']]]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
@workload_type.setter
def workload_type(self, value: Optional[pulumi.Input[Union[str, 'DataSourceType']]]):
pulumi.set(self, "workload_type", value)
@pulumi.input_type
class AzureVmWorkloadProtectedItemExtendedInfoArgs:
def __init__(__self__, *,
oldest_recovery_point: Optional[pulumi.Input[str]] = None,
policy_state: Optional[pulumi.Input[str]] = None,
recovery_point_count: Optional[pulumi.Input[int]] = None):
"""
Additional information on Azure Workload for SQL specific backup item.
:param pulumi.Input[str] oldest_recovery_point: The oldest backup copy available for this backup item.
:param pulumi.Input[str] policy_state: Indicates consistency of policy object and policy applied to this backup item.
:param pulumi.Input[int] recovery_point_count: Number of backup copies available for this backup item.
"""
if oldest_recovery_point is not None:
pulumi.set(__self__, "oldest_recovery_point", oldest_recovery_point)
if policy_state is not None:
pulumi.set(__self__, "policy_state", policy_state)
if recovery_point_count is not None:
pulumi.set(__self__, "recovery_point_count", recovery_point_count)
@property
@pulumi.getter(name="oldestRecoveryPoint")
def oldest_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
The oldest backup copy available for this backup item.
"""
return pulumi.get(self, "oldest_recovery_point")
@oldest_recovery_point.setter
def oldest_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oldest_recovery_point", value)
@property
@pulumi.getter(name="policyState")
def policy_state(self) -> Optional[pulumi.Input[str]]:
"""
Indicates consistency of policy object and policy applied to this backup item.
"""
return pulumi.get(self, "policy_state")
@policy_state.setter
def policy_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_state", value)
@property
@pulumi.getter(name="recoveryPointCount")
def recovery_point_count(self) -> Optional[pulumi.Input[int]]:
"""
Number of backup copies available for this backup item.
"""
return pulumi.get(self, "recovery_point_count")
@recovery_point_count.setter
def recovery_point_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "recovery_point_count", value)
@pulumi.input_type
class AzureVmWorkloadSAPAseDatabaseProtectedItemArgs:
def __init__(__self__, *,
protected_item_type: pulumi.Input[str],
backup_management_type: Optional[pulumi.Input[Union[str, 'BackupManagementType']]] = None,
backup_set_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None,
deferred_delete_time_in_utc: Optional[pulumi.Input[str]] = None,
deferred_delete_time_remaining: Optional[pulumi.Input[str]] = None,
extended_info: Optional[pulumi.Input['AzureVmWorkloadProtectedItemExtendedInfoArgs']] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
is_deferred_delete_schedule_upcoming: Optional[pulumi.Input[bool]] = None,
is_rehydrate: Optional[pulumi.Input[bool]] = None,
is_scheduled_for_deferred_delete: Optional[pulumi.Input[bool]] = None,
kpis_healths: Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]] = None,
last_backup_status: Optional[pulumi.Input[Union[str, 'LastBackupStatus']]] = None,
last_backup_time: Optional[pulumi.Input[str]] = None,
last_recovery_point: Optional[pulumi.Input[str]] = None,
parent_name: Optional[pulumi.Input[str]] = None,
parent_type: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
protected_item_data_source_id: Optional[pulumi.Input[str]] = None,
protected_item_health_status: Optional[pulumi.Input[Union[str, 'ProtectedItemHealthStatus']]] = None,
protection_state: Optional[pulumi.Input[Union[str, 'ProtectionState']]] = None,
protection_status: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
workload_type: Optional[pulumi.Input[Union[str, 'DataSourceType']]] = None):
"""
Azure VM workload-specific protected item representing SAP ASE Database.
:param pulumi.Input[str] protected_item_type: backup item type.
Expected value is 'AzureVmWorkloadSAPAseDatabase'.
:param pulumi.Input[Union[str, 'BackupManagementType']] backup_management_type: Type of backup management for the backed up item.
:param pulumi.Input[str] backup_set_name: Name of the backup set the backup item belongs to
:param pulumi.Input[str] container_name: Unique name of container
:param pulumi.Input[Union[str, 'CreateMode']] create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param pulumi.Input[str] deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param pulumi.Input['AzureVmWorkloadProtectedItemExtendedInfoArgs'] extended_info: Additional information for this backup item.
:param pulumi.Input[str] friendly_name: Friendly name of the DB represented by this backup item.
:param pulumi.Input[bool] is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param pulumi.Input[bool] is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param pulumi.Input[bool] is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]] kpis_healths: Health details of different KPIs
:param pulumi.Input[Union[str, 'LastBackupStatus']] last_backup_status: Last backup operation status. Possible values: Healthy, Unhealthy.
:param pulumi.Input[str] last_backup_time: Timestamp of the last backup operation on this backup item.
:param pulumi.Input[str] last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param pulumi.Input[str] parent_name: Parent name of the DB such as Instance or Availability Group.
:param pulumi.Input[str] parent_type: Parent type of protected item, example: for a DB, standalone server or distributed
:param pulumi.Input[str] policy_id: ID of the backup policy with which this item is backed up.
:param pulumi.Input[str] protected_item_data_source_id: Data ID of the protected item.
:param pulumi.Input[Union[str, 'ProtectedItemHealthStatus']] protected_item_health_status: Health status of the backup item, evaluated based on last heartbeat received
:param pulumi.Input[Union[str, 'ProtectionState']] protection_state: Backup state of this backup item.
:param pulumi.Input[str] protection_status: Backup status of this backup item.
:param pulumi.Input[str] server_name: Host/Cluster Name for instance or AG
:param pulumi.Input[str] source_resource_id: ARM ID of the resource to be backed up.
:param pulumi.Input[Union[str, 'DataSourceType']] workload_type: Type of workload this item represents.
"""
pulumi.set(__self__, "protected_item_type", 'AzureVmWorkloadSAPAseDatabase')
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if kpis_healths is not None:
pulumi.set(__self__, "kpis_healths", kpis_healths)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_backup_time is not None:
pulumi.set(__self__, "last_backup_time", last_backup_time)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if parent_name is not None:
pulumi.set(__self__, "parent_name", parent_name)
if parent_type is not None:
pulumi.set(__self__, "parent_type", parent_type)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protected_item_data_source_id is not None:
pulumi.set(__self__, "protected_item_data_source_id", protected_item_data_source_id)
if protected_item_health_status is not None:
pulumi.set(__self__, "protected_item_health_status", protected_item_health_status)
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if protection_status is not None:
pulumi.set(__self__, "protection_status", protection_status)
if server_name is not None:
pulumi.set(__self__, "server_name", server_name)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> pulumi.Input[str]:
"""
backup item type.
Expected value is 'AzureVmWorkloadSAPAseDatabase'.
"""
return pulumi.get(self, "protected_item_type")
@protected_item_type.setter
def protected_item_type(self, value: pulumi.Input[str]):
pulumi.set(self, "protected_item_type", value)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[pulumi.Input[Union[str, 'BackupManagementType']]]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@backup_management_type.setter
def backup_management_type(self, value: Optional[pulumi.Input[Union[str, 'BackupManagementType']]]):
pulumi.set(self, "backup_management_type", value)
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@backup_set_name.setter
def backup_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_set_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[Union[str, 'CreateMode']]]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[Union[str, 'CreateMode']]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[pulumi.Input[str]]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@deferred_delete_time_in_utc.setter
def deferred_delete_time_in_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[pulumi.Input[str]]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@deferred_delete_time_remaining.setter
def deferred_delete_time_remaining(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_remaining", value)
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional[pulumi.Input['AzureVmWorkloadProtectedItemExtendedInfoArgs']]:
"""
Additional information for this backup item.
"""
return pulumi.get(self, "extended_info")
@extended_info.setter
def extended_info(self, value: Optional[pulumi.Input['AzureVmWorkloadProtectedItemExtendedInfoArgs']]):
pulumi.set(self, "extended_info", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of the DB represented by this backup item.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@is_deferred_delete_schedule_upcoming.setter
def is_deferred_delete_schedule_upcoming(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_deferred_delete_schedule_upcoming", value)
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@is_rehydrate.setter
def is_rehydrate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_rehydrate", value)
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@is_scheduled_for_deferred_delete.setter
def is_scheduled_for_deferred_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_scheduled_for_deferred_delete", value)
@property
@pulumi.getter(name="kpisHealths")
def kpis_healths(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]]:
"""
Health details of different KPIs
"""
return pulumi.get(self, "kpis_healths")
@kpis_healths.setter
def kpis_healths(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]]):
pulumi.set(self, "kpis_healths", value)
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[pulumi.Input[Union[str, 'LastBackupStatus']]]:
"""
Last backup operation status. Possible values: Healthy, Unhealthy.
"""
return pulumi.get(self, "last_backup_status")
@last_backup_status.setter
def last_backup_status(self, value: Optional[pulumi.Input[Union[str, 'LastBackupStatus']]]):
pulumi.set(self, "last_backup_status", value)
@property
@pulumi.getter(name="lastBackupTime")
def last_backup_time(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp of the last backup operation on this backup item.
"""
return pulumi.get(self, "last_backup_time")
@last_backup_time.setter
def last_backup_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_time", value)
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@last_recovery_point.setter
def last_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_recovery_point", value)
@property
@pulumi.getter(name="parentName")
def parent_name(self) -> Optional[pulumi.Input[str]]:
"""
Parent name of the DB such as Instance or Availability Group.
"""
return pulumi.get(self, "parent_name")
@parent_name.setter
def parent_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent_name", value)
@property
@pulumi.getter(name="parentType")
def parent_type(self) -> Optional[pulumi.Input[str]]:
"""
Parent type of protected item, example: for a DB, standalone server or distributed
"""
return pulumi.get(self, "parent_type")
@parent_type.setter
def parent_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent_type", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="protectedItemDataSourceId")
def protected_item_data_source_id(self) -> Optional[pulumi.Input[str]]:
"""
Data ID of the protected item.
"""
return pulumi.get(self, "protected_item_data_source_id")
@protected_item_data_source_id.setter
def protected_item_data_source_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protected_item_data_source_id", value)
@property
@pulumi.getter(name="protectedItemHealthStatus")
def protected_item_health_status(self) -> Optional[pulumi.Input[Union[str, 'ProtectedItemHealthStatus']]]:
"""
Health status of the backup item, evaluated based on last heartbeat received
"""
return pulumi.get(self, "protected_item_health_status")
@protected_item_health_status.setter
def protected_item_health_status(self, value: Optional[pulumi.Input[Union[str, 'ProtectedItemHealthStatus']]]):
pulumi.set(self, "protected_item_health_status", value)
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[pulumi.Input[Union[str, 'ProtectionState']]]:
"""
Backup state of this backup item.
"""
return pulumi.get(self, "protection_state")
@protection_state.setter
def protection_state(self, value: Optional[pulumi.Input[Union[str, 'ProtectionState']]]):
pulumi.set(self, "protection_state", value)
@property
@pulumi.getter(name="protectionStatus")
def protection_status(self) -> Optional[pulumi.Input[str]]:
"""
Backup status of this backup item.
"""
return pulumi.get(self, "protection_status")
@protection_status.setter
def protection_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_status", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> Optional[pulumi.Input[str]]:
"""
Host/Cluster Name for instance or AG
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[pulumi.Input[Union[str, 'DataSourceType']]]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
@workload_type.setter
def workload_type(self, value: Optional[pulumi.Input[Union[str, 'DataSourceType']]]):
pulumi.set(self, "workload_type", value)
@pulumi.input_type
class AzureVmWorkloadSAPHanaDatabaseProtectedItemArgs:
def __init__(__self__, *,
protected_item_type: pulumi.Input[str],
backup_management_type: Optional[pulumi.Input[Union[str, 'BackupManagementType']]] = None,
backup_set_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None,
deferred_delete_time_in_utc: Optional[pulumi.Input[str]] = None,
deferred_delete_time_remaining: Optional[pulumi.Input[str]] = None,
extended_info: Optional[pulumi.Input['AzureVmWorkloadProtectedItemExtendedInfoArgs']] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
is_deferred_delete_schedule_upcoming: Optional[pulumi.Input[bool]] = None,
is_rehydrate: Optional[pulumi.Input[bool]] = None,
is_scheduled_for_deferred_delete: Optional[pulumi.Input[bool]] = None,
kpis_healths: Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]] = None,
last_backup_status: Optional[pulumi.Input[Union[str, 'LastBackupStatus']]] = None,
last_backup_time: Optional[pulumi.Input[str]] = None,
last_recovery_point: Optional[pulumi.Input[str]] = None,
parent_name: Optional[pulumi.Input[str]] = None,
parent_type: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
protected_item_data_source_id: Optional[pulumi.Input[str]] = None,
protected_item_health_status: Optional[pulumi.Input[Union[str, 'ProtectedItemHealthStatus']]] = None,
protection_state: Optional[pulumi.Input[Union[str, 'ProtectionState']]] = None,
protection_status: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
workload_type: Optional[pulumi.Input[Union[str, 'DataSourceType']]] = None):
"""
Azure VM workload-specific protected item representing SAP HANA Database.
:param pulumi.Input[str] protected_item_type: backup item type.
Expected value is 'AzureVmWorkloadSAPHanaDatabase'.
:param pulumi.Input[Union[str, 'BackupManagementType']] backup_management_type: Type of backup management for the backed up item.
:param pulumi.Input[str] backup_set_name: Name of the backup set the backup item belongs to
:param pulumi.Input[str] container_name: Unique name of container
:param pulumi.Input[Union[str, 'CreateMode']] create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param pulumi.Input[str] deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param pulumi.Input['AzureVmWorkloadProtectedItemExtendedInfoArgs'] extended_info: Additional information for this backup item.
:param pulumi.Input[str] friendly_name: Friendly name of the DB represented by this backup item.
:param pulumi.Input[bool] is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param pulumi.Input[bool] is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param pulumi.Input[bool] is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]] kpis_healths: Health details of different KPIs
:param pulumi.Input[Union[str, 'LastBackupStatus']] last_backup_status: Last backup operation status. Possible values: Healthy, Unhealthy.
:param pulumi.Input[str] last_backup_time: Timestamp of the last backup operation on this backup item.
:param pulumi.Input[str] last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param pulumi.Input[str] parent_name: Parent name of the DB such as Instance or Availability Group.
:param pulumi.Input[str] parent_type: Parent type of protected item, example: for a DB, standalone server or distributed
:param pulumi.Input[str] policy_id: ID of the backup policy with which this item is backed up.
:param pulumi.Input[str] protected_item_data_source_id: Data ID of the protected item.
:param pulumi.Input[Union[str, 'ProtectedItemHealthStatus']] protected_item_health_status: Health status of the backup item, evaluated based on last heartbeat received
:param pulumi.Input[Union[str, 'ProtectionState']] protection_state: Backup state of this backup item.
:param pulumi.Input[str] protection_status: Backup status of this backup item.
:param pulumi.Input[str] server_name: Host/Cluster Name for instance or AG
:param pulumi.Input[str] source_resource_id: ARM ID of the resource to be backed up.
:param pulumi.Input[Union[str, 'DataSourceType']] workload_type: Type of workload this item represents.
"""
pulumi.set(__self__, "protected_item_type", 'AzureVmWorkloadSAPHanaDatabase')
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if kpis_healths is not None:
pulumi.set(__self__, "kpis_healths", kpis_healths)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_backup_time is not None:
pulumi.set(__self__, "last_backup_time", last_backup_time)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if parent_name is not None:
pulumi.set(__self__, "parent_name", parent_name)
if parent_type is not None:
pulumi.set(__self__, "parent_type", parent_type)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protected_item_data_source_id is not None:
pulumi.set(__self__, "protected_item_data_source_id", protected_item_data_source_id)
if protected_item_health_status is not None:
pulumi.set(__self__, "protected_item_health_status", protected_item_health_status)
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if protection_status is not None:
pulumi.set(__self__, "protection_status", protection_status)
if server_name is not None:
pulumi.set(__self__, "server_name", server_name)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> pulumi.Input[str]:
"""
backup item type.
Expected value is 'AzureVmWorkloadSAPHanaDatabase'.
"""
return pulumi.get(self, "protected_item_type")
@protected_item_type.setter
def protected_item_type(self, value: pulumi.Input[str]):
pulumi.set(self, "protected_item_type", value)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[pulumi.Input[Union[str, 'BackupManagementType']]]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@backup_management_type.setter
def backup_management_type(self, value: Optional[pulumi.Input[Union[str, 'BackupManagementType']]]):
pulumi.set(self, "backup_management_type", value)
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@backup_set_name.setter
def backup_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_set_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[Union[str, 'CreateMode']]]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[Union[str, 'CreateMode']]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[pulumi.Input[str]]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@deferred_delete_time_in_utc.setter
def deferred_delete_time_in_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[pulumi.Input[str]]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@deferred_delete_time_remaining.setter
def deferred_delete_time_remaining(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_remaining", value)
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional[pulumi.Input['AzureVmWorkloadProtectedItemExtendedInfoArgs']]:
"""
Additional information for this backup item.
"""
return pulumi.get(self, "extended_info")
@extended_info.setter
def extended_info(self, value: Optional[pulumi.Input['AzureVmWorkloadProtectedItemExtendedInfoArgs']]):
pulumi.set(self, "extended_info", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of the DB represented by this backup item.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@is_deferred_delete_schedule_upcoming.setter
def is_deferred_delete_schedule_upcoming(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_deferred_delete_schedule_upcoming", value)
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@is_rehydrate.setter
def is_rehydrate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_rehydrate", value)
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@is_scheduled_for_deferred_delete.setter
def is_scheduled_for_deferred_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_scheduled_for_deferred_delete", value)
@property
@pulumi.getter(name="kpisHealths")
def kpis_healths(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]]:
"""
Health details of different KPIs
"""
return pulumi.get(self, "kpis_healths")
@kpis_healths.setter
def kpis_healths(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]]):
pulumi.set(self, "kpis_healths", value)
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[pulumi.Input[Union[str, 'LastBackupStatus']]]:
"""
Last backup operation status. Possible values: Healthy, Unhealthy.
"""
return pulumi.get(self, "last_backup_status")
@last_backup_status.setter
def last_backup_status(self, value: Optional[pulumi.Input[Union[str, 'LastBackupStatus']]]):
pulumi.set(self, "last_backup_status", value)
@property
@pulumi.getter(name="lastBackupTime")
def last_backup_time(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp of the last backup operation on this backup item.
"""
return pulumi.get(self, "last_backup_time")
@last_backup_time.setter
def last_backup_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_time", value)
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@last_recovery_point.setter
def last_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_recovery_point", value)
@property
@pulumi.getter(name="parentName")
def parent_name(self) -> Optional[pulumi.Input[str]]:
"""
Parent name of the DB such as Instance or Availability Group.
"""
return pulumi.get(self, "parent_name")
@parent_name.setter
def parent_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent_name", value)
@property
@pulumi.getter(name="parentType")
def parent_type(self) -> Optional[pulumi.Input[str]]:
"""
Parent type of protected item, example: for a DB, standalone server or distributed
"""
return pulumi.get(self, "parent_type")
@parent_type.setter
def parent_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent_type", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="protectedItemDataSourceId")
def protected_item_data_source_id(self) -> Optional[pulumi.Input[str]]:
"""
Data ID of the protected item.
"""
return pulumi.get(self, "protected_item_data_source_id")
@protected_item_data_source_id.setter
def protected_item_data_source_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protected_item_data_source_id", value)
@property
@pulumi.getter(name="protectedItemHealthStatus")
def protected_item_health_status(self) -> Optional[pulumi.Input[Union[str, 'ProtectedItemHealthStatus']]]:
"""
Health status of the backup item, evaluated based on last heartbeat received
"""
return pulumi.get(self, "protected_item_health_status")
@protected_item_health_status.setter
def protected_item_health_status(self, value: Optional[pulumi.Input[Union[str, 'ProtectedItemHealthStatus']]]):
pulumi.set(self, "protected_item_health_status", value)
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[pulumi.Input[Union[str, 'ProtectionState']]]:
"""
Backup state of this backup item.
"""
return pulumi.get(self, "protection_state")
@protection_state.setter
def protection_state(self, value: Optional[pulumi.Input[Union[str, 'ProtectionState']]]):
pulumi.set(self, "protection_state", value)
@property
@pulumi.getter(name="protectionStatus")
def protection_status(self) -> Optional[pulumi.Input[str]]:
"""
Backup status of this backup item.
"""
return pulumi.get(self, "protection_status")
@protection_status.setter
def protection_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_status", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> Optional[pulumi.Input[str]]:
"""
Host/Cluster Name for instance or AG
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[pulumi.Input[Union[str, 'DataSourceType']]]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
@workload_type.setter
def workload_type(self, value: Optional[pulumi.Input[Union[str, 'DataSourceType']]]):
pulumi.set(self, "workload_type", value)
@pulumi.input_type
class AzureVmWorkloadSQLDatabaseProtectedItemArgs:
def __init__(__self__, *,
protected_item_type: pulumi.Input[str],
backup_management_type: Optional[pulumi.Input[Union[str, 'BackupManagementType']]] = None,
backup_set_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None,
deferred_delete_time_in_utc: Optional[pulumi.Input[str]] = None,
deferred_delete_time_remaining: Optional[pulumi.Input[str]] = None,
extended_info: Optional[pulumi.Input['AzureVmWorkloadProtectedItemExtendedInfoArgs']] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
is_deferred_delete_schedule_upcoming: Optional[pulumi.Input[bool]] = None,
is_rehydrate: Optional[pulumi.Input[bool]] = None,
is_scheduled_for_deferred_delete: Optional[pulumi.Input[bool]] = None,
kpis_healths: Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]] = None,
last_backup_status: Optional[pulumi.Input[Union[str, 'LastBackupStatus']]] = None,
last_backup_time: Optional[pulumi.Input[str]] = None,
last_recovery_point: Optional[pulumi.Input[str]] = None,
parent_name: Optional[pulumi.Input[str]] = None,
parent_type: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
protected_item_data_source_id: Optional[pulumi.Input[str]] = None,
protected_item_health_status: Optional[pulumi.Input[Union[str, 'ProtectedItemHealthStatus']]] = None,
protection_state: Optional[pulumi.Input[Union[str, 'ProtectionState']]] = None,
protection_status: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
workload_type: Optional[pulumi.Input[Union[str, 'DataSourceType']]] = None):
"""
Azure VM workload-specific protected item representing SQL Database.
:param pulumi.Input[str] protected_item_type: backup item type.
Expected value is 'AzureVmWorkloadSQLDatabase'.
:param pulumi.Input[Union[str, 'BackupManagementType']] backup_management_type: Type of backup management for the backed up item.
:param pulumi.Input[str] backup_set_name: Name of the backup set the backup item belongs to
:param pulumi.Input[str] container_name: Unique name of container
:param pulumi.Input[Union[str, 'CreateMode']] create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param pulumi.Input[str] deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param pulumi.Input['AzureVmWorkloadProtectedItemExtendedInfoArgs'] extended_info: Additional information for this backup item.
:param pulumi.Input[str] friendly_name: Friendly name of the DB represented by this backup item.
:param pulumi.Input[bool] is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param pulumi.Input[bool] is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param pulumi.Input[bool] is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]] kpis_healths: Health details of different KPIs
:param pulumi.Input[Union[str, 'LastBackupStatus']] last_backup_status: Last backup operation status. Possible values: Healthy, Unhealthy.
:param pulumi.Input[str] last_backup_time: Timestamp of the last backup operation on this backup item.
:param pulumi.Input[str] last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param pulumi.Input[str] parent_name: Parent name of the DB such as Instance or Availability Group.
:param pulumi.Input[str] parent_type: Parent type of protected item, example: for a DB, standalone server or distributed
:param pulumi.Input[str] policy_id: ID of the backup policy with which this item is backed up.
:param pulumi.Input[str] protected_item_data_source_id: Data ID of the protected item.
:param pulumi.Input[Union[str, 'ProtectedItemHealthStatus']] protected_item_health_status: Health status of the backup item, evaluated based on last heartbeat received
:param pulumi.Input[Union[str, 'ProtectionState']] protection_state: Backup state of this backup item.
:param pulumi.Input[str] protection_status: Backup status of this backup item.
:param pulumi.Input[str] server_name: Host/Cluster Name for instance or AG
:param pulumi.Input[str] source_resource_id: ARM ID of the resource to be backed up.
:param pulumi.Input[Union[str, 'DataSourceType']] workload_type: Type of workload this item represents.
"""
pulumi.set(__self__, "protected_item_type", 'AzureVmWorkloadSQLDatabase')
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if kpis_healths is not None:
pulumi.set(__self__, "kpis_healths", kpis_healths)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_backup_time is not None:
pulumi.set(__self__, "last_backup_time", last_backup_time)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if parent_name is not None:
pulumi.set(__self__, "parent_name", parent_name)
if parent_type is not None:
pulumi.set(__self__, "parent_type", parent_type)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protected_item_data_source_id is not None:
pulumi.set(__self__, "protected_item_data_source_id", protected_item_data_source_id)
if protected_item_health_status is not None:
pulumi.set(__self__, "protected_item_health_status", protected_item_health_status)
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if protection_status is not None:
pulumi.set(__self__, "protection_status", protection_status)
if server_name is not None:
pulumi.set(__self__, "server_name", server_name)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> pulumi.Input[str]:
"""
backup item type.
Expected value is 'AzureVmWorkloadSQLDatabase'.
"""
return pulumi.get(self, "protected_item_type")
@protected_item_type.setter
def protected_item_type(self, value: pulumi.Input[str]):
pulumi.set(self, "protected_item_type", value)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[pulumi.Input[Union[str, 'BackupManagementType']]]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@backup_management_type.setter
def backup_management_type(self, value: Optional[pulumi.Input[Union[str, 'BackupManagementType']]]):
pulumi.set(self, "backup_management_type", value)
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@backup_set_name.setter
def backup_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_set_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[Union[str, 'CreateMode']]]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[Union[str, 'CreateMode']]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[pulumi.Input[str]]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@deferred_delete_time_in_utc.setter
def deferred_delete_time_in_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[pulumi.Input[str]]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@deferred_delete_time_remaining.setter
def deferred_delete_time_remaining(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_remaining", value)
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional[pulumi.Input['AzureVmWorkloadProtectedItemExtendedInfoArgs']]:
"""
Additional information for this backup item.
"""
return pulumi.get(self, "extended_info")
@extended_info.setter
def extended_info(self, value: Optional[pulumi.Input['AzureVmWorkloadProtectedItemExtendedInfoArgs']]):
pulumi.set(self, "extended_info", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of the DB represented by this backup item.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@is_deferred_delete_schedule_upcoming.setter
def is_deferred_delete_schedule_upcoming(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_deferred_delete_schedule_upcoming", value)
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@is_rehydrate.setter
def is_rehydrate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_rehydrate", value)
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@is_scheduled_for_deferred_delete.setter
def is_scheduled_for_deferred_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_scheduled_for_deferred_delete", value)
@property
@pulumi.getter(name="kpisHealths")
def kpis_healths(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]]:
"""
Health details of different KPIs
"""
return pulumi.get(self, "kpis_healths")
@kpis_healths.setter
def kpis_healths(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['KPIResourceHealthDetailsArgs']]]]):
pulumi.set(self, "kpis_healths", value)
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[pulumi.Input[Union[str, 'LastBackupStatus']]]:
"""
Last backup operation status. Possible values: Healthy, Unhealthy.
"""
return pulumi.get(self, "last_backup_status")
@last_backup_status.setter
def last_backup_status(self, value: Optional[pulumi.Input[Union[str, 'LastBackupStatus']]]):
pulumi.set(self, "last_backup_status", value)
@property
@pulumi.getter(name="lastBackupTime")
def last_backup_time(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp of the last backup operation on this backup item.
"""
return pulumi.get(self, "last_backup_time")
@last_backup_time.setter
def last_backup_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_time", value)
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@last_recovery_point.setter
def last_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_recovery_point", value)
@property
@pulumi.getter(name="parentName")
def parent_name(self) -> Optional[pulumi.Input[str]]:
"""
Parent name of the DB such as Instance or Availability Group.
"""
return pulumi.get(self, "parent_name")
@parent_name.setter
def parent_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent_name", value)
@property
@pulumi.getter(name="parentType")
def parent_type(self) -> Optional[pulumi.Input[str]]:
"""
Parent type of protected item, example: for a DB, standalone server or distributed
"""
return pulumi.get(self, "parent_type")
@parent_type.setter
def parent_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent_type", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="protectedItemDataSourceId")
def protected_item_data_source_id(self) -> Optional[pulumi.Input[str]]:
"""
Data ID of the protected item.
"""
return pulumi.get(self, "protected_item_data_source_id")
@protected_item_data_source_id.setter
def protected_item_data_source_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protected_item_data_source_id", value)
@property
@pulumi.getter(name="protectedItemHealthStatus")
def protected_item_health_status(self) -> Optional[pulumi.Input[Union[str, 'ProtectedItemHealthStatus']]]:
"""
Health status of the backup item, evaluated based on last heartbeat received
"""
return pulumi.get(self, "protected_item_health_status")
@protected_item_health_status.setter
def protected_item_health_status(self, value: Optional[pulumi.Input[Union[str, 'ProtectedItemHealthStatus']]]):
pulumi.set(self, "protected_item_health_status", value)
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[pulumi.Input[Union[str, 'ProtectionState']]]:
"""
Backup state of this backup item.
"""
return pulumi.get(self, "protection_state")
@protection_state.setter
def protection_state(self, value: Optional[pulumi.Input[Union[str, 'ProtectionState']]]):
pulumi.set(self, "protection_state", value)
@property
@pulumi.getter(name="protectionStatus")
def protection_status(self) -> Optional[pulumi.Input[str]]:
"""
Backup status of this backup item.
"""
return pulumi.get(self, "protection_status")
@protection_status.setter
def protection_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_status", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> Optional[pulumi.Input[str]]:
"""
Host/Cluster Name for instance or AG
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[pulumi.Input[Union[str, 'DataSourceType']]]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
@workload_type.setter
def workload_type(self, value: Optional[pulumi.Input[Union[str, 'DataSourceType']]]):
pulumi.set(self, "workload_type", value)
@pulumi.input_type
class DPMProtectedItemArgs:
def __init__(__self__, *,
protected_item_type: pulumi.Input[str],
backup_engine_name: Optional[pulumi.Input[str]] = None,
backup_management_type: Optional[pulumi.Input[Union[str, 'BackupManagementType']]] = None,
backup_set_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None,
deferred_delete_time_in_utc: Optional[pulumi.Input[str]] = None,
deferred_delete_time_remaining: Optional[pulumi.Input[str]] = None,
extended_info: Optional[pulumi.Input['DPMProtectedItemExtendedInfoArgs']] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
is_deferred_delete_schedule_upcoming: Optional[pulumi.Input[bool]] = None,
is_rehydrate: Optional[pulumi.Input[bool]] = None,
is_scheduled_for_deferred_delete: Optional[pulumi.Input[bool]] = None,
last_recovery_point: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
protection_state: Optional[pulumi.Input[Union[str, 'ProtectedItemState']]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
workload_type: Optional[pulumi.Input[Union[str, 'DataSourceType']]] = None):
"""
Additional information on Backup engine specific backup item.
:param pulumi.Input[str] protected_item_type: backup item type.
Expected value is 'DPMProtectedItem'.
:param pulumi.Input[str] backup_engine_name: Backup Management server protecting this backup item
:param pulumi.Input[Union[str, 'BackupManagementType']] backup_management_type: Type of backup management for the backed up item.
:param pulumi.Input[str] backup_set_name: Name of the backup set the backup item belongs to
:param pulumi.Input[str] container_name: Unique name of container
:param pulumi.Input[Union[str, 'CreateMode']] create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param pulumi.Input[str] deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param pulumi.Input['DPMProtectedItemExtendedInfoArgs'] extended_info: Extended info of the backup item.
:param pulumi.Input[str] friendly_name: Friendly name of the managed item
:param pulumi.Input[bool] is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param pulumi.Input[bool] is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param pulumi.Input[bool] is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param pulumi.Input[str] last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param pulumi.Input[str] policy_id: ID of the backup policy with which this item is backed up.
:param pulumi.Input[Union[str, 'ProtectedItemState']] protection_state: Protection state of the backup engine
:param pulumi.Input[str] source_resource_id: ARM ID of the resource to be backed up.
:param pulumi.Input[Union[str, 'DataSourceType']] workload_type: Type of workload this item represents.
"""
pulumi.set(__self__, "protected_item_type", 'DPMProtectedItem')
if backup_engine_name is not None:
pulumi.set(__self__, "backup_engine_name", backup_engine_name)
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> pulumi.Input[str]:
"""
backup item type.
Expected value is 'DPMProtectedItem'.
"""
return pulumi.get(self, "protected_item_type")
@protected_item_type.setter
def protected_item_type(self, value: pulumi.Input[str]):
pulumi.set(self, "protected_item_type", value)
@property
@pulumi.getter(name="backupEngineName")
def backup_engine_name(self) -> Optional[pulumi.Input[str]]:
"""
Backup Management server protecting this backup item
"""
return pulumi.get(self, "backup_engine_name")
@backup_engine_name.setter
def backup_engine_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_engine_name", value)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[pulumi.Input[Union[str, 'BackupManagementType']]]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@backup_management_type.setter
def backup_management_type(self, value: Optional[pulumi.Input[Union[str, 'BackupManagementType']]]):
pulumi.set(self, "backup_management_type", value)
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@backup_set_name.setter
def backup_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_set_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[Union[str, 'CreateMode']]]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[Union[str, 'CreateMode']]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[pulumi.Input[str]]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@deferred_delete_time_in_utc.setter
def deferred_delete_time_in_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[pulumi.Input[str]]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@deferred_delete_time_remaining.setter
def deferred_delete_time_remaining(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_remaining", value)
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional[pulumi.Input['DPMProtectedItemExtendedInfoArgs']]:
"""
Extended info of the backup item.
"""
return pulumi.get(self, "extended_info")
@extended_info.setter
def extended_info(self, value: Optional[pulumi.Input['DPMProtectedItemExtendedInfoArgs']]):
pulumi.set(self, "extended_info", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of the managed item
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@is_deferred_delete_schedule_upcoming.setter
def is_deferred_delete_schedule_upcoming(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_deferred_delete_schedule_upcoming", value)
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@is_rehydrate.setter
def is_rehydrate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_rehydrate", value)
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@is_scheduled_for_deferred_delete.setter
def is_scheduled_for_deferred_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_scheduled_for_deferred_delete", value)
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@last_recovery_point.setter
def last_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_recovery_point", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[pulumi.Input[Union[str, 'ProtectedItemState']]]:
"""
Protection state of the backup engine
"""
return pulumi.get(self, "protection_state")
@protection_state.setter
def protection_state(self, value: Optional[pulumi.Input[Union[str, 'ProtectedItemState']]]):
pulumi.set(self, "protection_state", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[pulumi.Input[Union[str, 'DataSourceType']]]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
@workload_type.setter
def workload_type(self, value: Optional[pulumi.Input[Union[str, 'DataSourceType']]]):
pulumi.set(self, "workload_type", value)
@pulumi.input_type
class DPMProtectedItemExtendedInfoArgs:
def __init__(__self__, *,
disk_storage_used_in_bytes: Optional[pulumi.Input[str]] = None,
is_collocated: Optional[pulumi.Input[bool]] = None,
is_present_on_cloud: Optional[pulumi.Input[bool]] = None,
last_backup_status: Optional[pulumi.Input[str]] = None,
last_refreshed_at: Optional[pulumi.Input[str]] = None,
oldest_recovery_point: Optional[pulumi.Input[str]] = None,
on_premise_latest_recovery_point: Optional[pulumi.Input[str]] = None,
on_premise_oldest_recovery_point: Optional[pulumi.Input[str]] = None,
on_premise_recovery_point_count: Optional[pulumi.Input[int]] = None,
protectable_object_load_path: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
protected: Optional[pulumi.Input[bool]] = None,
protection_group_name: Optional[pulumi.Input[str]] = None,
recovery_point_count: Optional[pulumi.Input[int]] = None,
total_disk_storage_size_in_bytes: Optional[pulumi.Input[str]] = None):
"""
Additional information of DPM Protected item.
:param pulumi.Input[str] disk_storage_used_in_bytes: Used Disk storage in bytes.
:param pulumi.Input[bool] is_collocated: To check if backup item is collocated.
:param pulumi.Input[bool] is_present_on_cloud: To check if backup item is cloud protected.
:param pulumi.Input[str] last_backup_status: Last backup status information on backup item.
:param pulumi.Input[str] last_refreshed_at: Last refresh time on backup item.
:param pulumi.Input[str] oldest_recovery_point: Oldest cloud recovery point time.
:param pulumi.Input[str] on_premise_latest_recovery_point: latest disk recovery point time.
:param pulumi.Input[str] on_premise_oldest_recovery_point: Oldest disk recovery point time.
:param pulumi.Input[int] on_premise_recovery_point_count: disk recovery point count.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] protectable_object_load_path: Attribute to provide information on various DBs.
:param pulumi.Input[bool] protected: To check if backup item is disk protected.
:param pulumi.Input[str] protection_group_name: Protection group name of the backup item.
:param pulumi.Input[int] recovery_point_count: cloud recovery point count.
:param pulumi.Input[str] total_disk_storage_size_in_bytes: total Disk storage in bytes.
"""
if disk_storage_used_in_bytes is not None:
pulumi.set(__self__, "disk_storage_used_in_bytes", disk_storage_used_in_bytes)
if is_collocated is not None:
pulumi.set(__self__, "is_collocated", is_collocated)
if is_present_on_cloud is not None:
pulumi.set(__self__, "is_present_on_cloud", is_present_on_cloud)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_refreshed_at is not None:
pulumi.set(__self__, "last_refreshed_at", last_refreshed_at)
if oldest_recovery_point is not None:
pulumi.set(__self__, "oldest_recovery_point", oldest_recovery_point)
if on_premise_latest_recovery_point is not None:
pulumi.set(__self__, "on_premise_latest_recovery_point", on_premise_latest_recovery_point)
if on_premise_oldest_recovery_point is not None:
pulumi.set(__self__, "on_premise_oldest_recovery_point", on_premise_oldest_recovery_point)
if on_premise_recovery_point_count is not None:
pulumi.set(__self__, "on_premise_recovery_point_count", on_premise_recovery_point_count)
if protectable_object_load_path is not None:
pulumi.set(__self__, "protectable_object_load_path", protectable_object_load_path)
if protected is not None:
pulumi.set(__self__, "protected", protected)
if protection_group_name is not None:
pulumi.set(__self__, "protection_group_name", protection_group_name)
if recovery_point_count is not None:
pulumi.set(__self__, "recovery_point_count", recovery_point_count)
if total_disk_storage_size_in_bytes is not None:
pulumi.set(__self__, "total_disk_storage_size_in_bytes", total_disk_storage_size_in_bytes)
@property
@pulumi.getter(name="diskStorageUsedInBytes")
def disk_storage_used_in_bytes(self) -> Optional[pulumi.Input[str]]:
"""
Used Disk storage in bytes.
"""
return pulumi.get(self, "disk_storage_used_in_bytes")
@disk_storage_used_in_bytes.setter
def disk_storage_used_in_bytes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "disk_storage_used_in_bytes", value)
@property
@pulumi.getter(name="isCollocated")
def is_collocated(self) -> Optional[pulumi.Input[bool]]:
"""
To check if backup item is collocated.
"""
return pulumi.get(self, "is_collocated")
@is_collocated.setter
def is_collocated(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_collocated", value)
@property
@pulumi.getter(name="isPresentOnCloud")
def is_present_on_cloud(self) -> Optional[pulumi.Input[bool]]:
"""
To check if backup item is cloud protected.
"""
return pulumi.get(self, "is_present_on_cloud")
@is_present_on_cloud.setter
def is_present_on_cloud(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_present_on_cloud", value)
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[pulumi.Input[str]]:
"""
Last backup status information on backup item.
"""
return pulumi.get(self, "last_backup_status")
@last_backup_status.setter
def last_backup_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_status", value)
@property
@pulumi.getter(name="lastRefreshedAt")
def last_refreshed_at(self) -> Optional[pulumi.Input[str]]:
"""
Last refresh time on backup item.
"""
return pulumi.get(self, "last_refreshed_at")
@last_refreshed_at.setter
def last_refreshed_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_refreshed_at", value)
@property
@pulumi.getter(name="oldestRecoveryPoint")
def oldest_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Oldest cloud recovery point time.
"""
return pulumi.get(self, "oldest_recovery_point")
@oldest_recovery_point.setter
def oldest_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oldest_recovery_point", value)
@property
@pulumi.getter(name="onPremiseLatestRecoveryPoint")
def on_premise_latest_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
latest disk recovery point time.
"""
return pulumi.get(self, "on_premise_latest_recovery_point")
@on_premise_latest_recovery_point.setter
def on_premise_latest_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "on_premise_latest_recovery_point", value)
@property
@pulumi.getter(name="onPremiseOldestRecoveryPoint")
def on_premise_oldest_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Oldest disk recovery point time.
"""
return pulumi.get(self, "on_premise_oldest_recovery_point")
@on_premise_oldest_recovery_point.setter
def on_premise_oldest_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "on_premise_oldest_recovery_point", value)
@property
@pulumi.getter(name="onPremiseRecoveryPointCount")
def on_premise_recovery_point_count(self) -> Optional[pulumi.Input[int]]:
"""
disk recovery point count.
"""
return pulumi.get(self, "on_premise_recovery_point_count")
@on_premise_recovery_point_count.setter
def on_premise_recovery_point_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "on_premise_recovery_point_count", value)
@property
@pulumi.getter(name="protectableObjectLoadPath")
def protectable_object_load_path(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Attribute to provide information on various DBs.
"""
return pulumi.get(self, "protectable_object_load_path")
@protectable_object_load_path.setter
def protectable_object_load_path(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "protectable_object_load_path", value)
@property
@pulumi.getter
def protected(self) -> Optional[pulumi.Input[bool]]:
"""
To check if backup item is disk protected.
"""
return pulumi.get(self, "protected")
@protected.setter
def protected(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "protected", value)
@property
@pulumi.getter(name="protectionGroupName")
def protection_group_name(self) -> Optional[pulumi.Input[str]]:
"""
Protection group name of the backup item.
"""
return pulumi.get(self, "protection_group_name")
@protection_group_name.setter
def protection_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_group_name", value)
@property
@pulumi.getter(name="recoveryPointCount")
def recovery_point_count(self) -> Optional[pulumi.Input[int]]:
"""
cloud recovery point count.
"""
return pulumi.get(self, "recovery_point_count")
@recovery_point_count.setter
def recovery_point_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "recovery_point_count", value)
@property
@pulumi.getter(name="totalDiskStorageSizeInBytes")
def total_disk_storage_size_in_bytes(self) -> Optional[pulumi.Input[str]]:
"""
total Disk storage in bytes.
"""
return pulumi.get(self, "total_disk_storage_size_in_bytes")
@total_disk_storage_size_in_bytes.setter
def total_disk_storage_size_in_bytes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "total_disk_storage_size_in_bytes", value)
@pulumi.input_type
class DiskExclusionPropertiesArgs:
def __init__(__self__, *,
disk_lun_list: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
is_inclusion_list: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input[int]]] disk_lun_list: List of Disks' Logical Unit Numbers (LUN) to be used for VM Protection.
:param pulumi.Input[bool] is_inclusion_list: Flag to indicate whether DiskLunList is to be included/ excluded from backup.
"""
if disk_lun_list is not None:
pulumi.set(__self__, "disk_lun_list", disk_lun_list)
if is_inclusion_list is not None:
pulumi.set(__self__, "is_inclusion_list", is_inclusion_list)
@property
@pulumi.getter(name="diskLunList")
def disk_lun_list(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
"""
List of Disks' Logical Unit Numbers (LUN) to be used for VM Protection.
"""
return pulumi.get(self, "disk_lun_list")
@disk_lun_list.setter
def disk_lun_list(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "disk_lun_list", value)
@property
@pulumi.getter(name="isInclusionList")
def is_inclusion_list(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to indicate whether DiskLunList is to be included/ excluded from backup.
"""
return pulumi.get(self, "is_inclusion_list")
@is_inclusion_list.setter
def is_inclusion_list(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_inclusion_list", value)
@pulumi.input_type
class ExtendedPropertiesArgs:
def __init__(__self__, *,
disk_exclusion_properties: Optional[pulumi.Input['DiskExclusionPropertiesArgs']] = None):
"""
Extended Properties for Azure IaasVM Backup.
:param pulumi.Input['DiskExclusionPropertiesArgs'] disk_exclusion_properties: Extended Properties for Disk Exclusion.
"""
if disk_exclusion_properties is not None:
pulumi.set(__self__, "disk_exclusion_properties", disk_exclusion_properties)
@property
@pulumi.getter(name="diskExclusionProperties")
def disk_exclusion_properties(self) -> Optional[pulumi.Input['DiskExclusionPropertiesArgs']]:
"""
Extended Properties for Disk Exclusion.
"""
return pulumi.get(self, "disk_exclusion_properties")
@disk_exclusion_properties.setter
def disk_exclusion_properties(self, value: Optional[pulumi.Input['DiskExclusionPropertiesArgs']]):
pulumi.set(self, "disk_exclusion_properties", value)
@pulumi.input_type
class GenericProtectedItemArgs:
def __init__(__self__, *,
protected_item_type: pulumi.Input[str],
backup_management_type: Optional[pulumi.Input[Union[str, 'BackupManagementType']]] = None,
backup_set_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None,
deferred_delete_time_in_utc: Optional[pulumi.Input[str]] = None,
deferred_delete_time_remaining: Optional[pulumi.Input[str]] = None,
fabric_name: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
is_deferred_delete_schedule_upcoming: Optional[pulumi.Input[bool]] = None,
is_rehydrate: Optional[pulumi.Input[bool]] = None,
is_scheduled_for_deferred_delete: Optional[pulumi.Input[bool]] = None,
last_recovery_point: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
policy_state: Optional[pulumi.Input[str]] = None,
protected_item_id: Optional[pulumi.Input[float]] = None,
protection_state: Optional[pulumi.Input[Union[str, 'ProtectionState']]] = None,
source_associations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
workload_type: Optional[pulumi.Input[Union[str, 'DataSourceType']]] = None):
"""
Base class for backup items.
:param pulumi.Input[str] protected_item_type: backup item type.
Expected value is 'GenericProtectedItem'.
:param pulumi.Input[Union[str, 'BackupManagementType']] backup_management_type: Type of backup management for the backed up item.
:param pulumi.Input[str] backup_set_name: Name of the backup set the backup item belongs to
:param pulumi.Input[str] container_name: Unique name of container
:param pulumi.Input[Union[str, 'CreateMode']] create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param pulumi.Input[str] deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param pulumi.Input[str] fabric_name: Name of this backup item's fabric.
:param pulumi.Input[str] friendly_name: Friendly name of the container.
:param pulumi.Input[bool] is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param pulumi.Input[bool] is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param pulumi.Input[bool] is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param pulumi.Input[str] last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param pulumi.Input[str] policy_id: ID of the backup policy with which this item is backed up.
:param pulumi.Input[str] policy_state: Indicates consistency of policy object and policy applied to this backup item.
:param pulumi.Input[float] protected_item_id: Data Plane Service ID of the protected item.
:param pulumi.Input[Union[str, 'ProtectionState']] protection_state: Backup state of this backup item.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] source_associations: Loosely coupled (type, value) associations (example - parent of a protected item)
:param pulumi.Input[str] source_resource_id: ARM ID of the resource to be backed up.
:param pulumi.Input[Union[str, 'DataSourceType']] workload_type: Type of workload this item represents.
"""
pulumi.set(__self__, "protected_item_type", 'GenericProtectedItem')
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if fabric_name is not None:
pulumi.set(__self__, "fabric_name", fabric_name)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if policy_state is not None:
pulumi.set(__self__, "policy_state", policy_state)
if protected_item_id is not None:
pulumi.set(__self__, "protected_item_id", protected_item_id)
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if source_associations is not None:
pulumi.set(__self__, "source_associations", source_associations)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> pulumi.Input[str]:
"""
backup item type.
Expected value is 'GenericProtectedItem'.
"""
return pulumi.get(self, "protected_item_type")
@protected_item_type.setter
def protected_item_type(self, value: pulumi.Input[str]):
pulumi.set(self, "protected_item_type", value)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[pulumi.Input[Union[str, 'BackupManagementType']]]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@backup_management_type.setter
def backup_management_type(self, value: Optional[pulumi.Input[Union[str, 'BackupManagementType']]]):
pulumi.set(self, "backup_management_type", value)
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@backup_set_name.setter
def backup_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_set_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[Union[str, 'CreateMode']]]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[Union[str, 'CreateMode']]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[pulumi.Input[str]]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@deferred_delete_time_in_utc.setter
def deferred_delete_time_in_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[pulumi.Input[str]]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@deferred_delete_time_remaining.setter
def deferred_delete_time_remaining(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_remaining", value)
@property
@pulumi.getter(name="fabricName")
def fabric_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of this backup item's fabric.
"""
return pulumi.get(self, "fabric_name")
@fabric_name.setter
def fabric_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fabric_name", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of the container.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@is_deferred_delete_schedule_upcoming.setter
def is_deferred_delete_schedule_upcoming(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_deferred_delete_schedule_upcoming", value)
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@is_rehydrate.setter
def is_rehydrate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_rehydrate", value)
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@is_scheduled_for_deferred_delete.setter
def is_scheduled_for_deferred_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_scheduled_for_deferred_delete", value)
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@last_recovery_point.setter
def last_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_recovery_point", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="policyState")
def policy_state(self) -> Optional[pulumi.Input[str]]:
"""
Indicates consistency of policy object and policy applied to this backup item.
"""
return pulumi.get(self, "policy_state")
@policy_state.setter
def policy_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_state", value)
@property
@pulumi.getter(name="protectedItemId")
def protected_item_id(self) -> Optional[pulumi.Input[float]]:
"""
Data Plane Service ID of the protected item.
"""
return pulumi.get(self, "protected_item_id")
@protected_item_id.setter
def protected_item_id(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "protected_item_id", value)
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[pulumi.Input[Union[str, 'ProtectionState']]]:
"""
Backup state of this backup item.
"""
return pulumi.get(self, "protection_state")
@protection_state.setter
def protection_state(self, value: Optional[pulumi.Input[Union[str, 'ProtectionState']]]):
pulumi.set(self, "protection_state", value)
@property
@pulumi.getter(name="sourceAssociations")
def source_associations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Loosely coupled (type, value) associations (example - parent of a protected item)
"""
return pulumi.get(self, "source_associations")
@source_associations.setter
def source_associations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "source_associations", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[pulumi.Input[Union[str, 'DataSourceType']]]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
@workload_type.setter
def workload_type(self, value: Optional[pulumi.Input[Union[str, 'DataSourceType']]]):
pulumi.set(self, "workload_type", value)
@pulumi.input_type
class KPIResourceHealthDetailsArgs:
def __init__(__self__, *,
resource_health_status: Optional[pulumi.Input[Union[str, 'ResourceHealthStatus']]] = None):
"""
KPI Resource Health Details
:param pulumi.Input[Union[str, 'ResourceHealthStatus']] resource_health_status: Resource Health Status
"""
if resource_health_status is not None:
pulumi.set(__self__, "resource_health_status", resource_health_status)
@property
@pulumi.getter(name="resourceHealthStatus")
def resource_health_status(self) -> Optional[pulumi.Input[Union[str, 'ResourceHealthStatus']]]:
"""
Resource Health Status
"""
return pulumi.get(self, "resource_health_status")
@resource_health_status.setter
def resource_health_status(self, value: Optional[pulumi.Input[Union[str, 'ResourceHealthStatus']]]):
pulumi.set(self, "resource_health_status", value)
@pulumi.input_type
class MabFileFolderProtectedItemArgs:
def __init__(__self__, *,
protected_item_type: pulumi.Input[str],
backup_management_type: Optional[pulumi.Input[Union[str, 'BackupManagementType']]] = None,
backup_set_name: Optional[pulumi.Input[str]] = None,
computer_name: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[Union[str, 'CreateMode']]] = None,
deferred_delete_sync_time_in_utc: Optional[pulumi.Input[float]] = None,
deferred_delete_time_in_utc: Optional[pulumi.Input[str]] = None,
deferred_delete_time_remaining: Optional[pulumi.Input[str]] = None,
extended_info: Optional[pulumi.Input['MabFileFolderProtectedItemExtendedInfoArgs']] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
is_deferred_delete_schedule_upcoming: Optional[pulumi.Input[bool]] = None,
is_rehydrate: Optional[pulumi.Input[bool]] = None,
is_scheduled_for_deferred_delete: Optional[pulumi.Input[bool]] = None,
last_backup_status: Optional[pulumi.Input[str]] = None,
last_backup_time: Optional[pulumi.Input[str]] = None,
last_recovery_point: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
protection_state: Optional[pulumi.Input[str]] = None,
source_resource_id: Optional[pulumi.Input[str]] = None,
workload_type: Optional[pulumi.Input[Union[str, 'DataSourceType']]] = None):
"""
MAB workload-specific backup item.
:param pulumi.Input[str] protected_item_type: backup item type.
Expected value is 'MabFileFolderProtectedItem'.
:param pulumi.Input[Union[str, 'BackupManagementType']] backup_management_type: Type of backup management for the backed up item.
:param pulumi.Input[str] backup_set_name: Name of the backup set the backup item belongs to
:param pulumi.Input[str] computer_name: Name of the computer associated with this backup item.
:param pulumi.Input[str] container_name: Unique name of container
:param pulumi.Input[Union[str, 'CreateMode']] create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param pulumi.Input[float] deferred_delete_sync_time_in_utc: Sync time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param pulumi.Input[str] deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param pulumi.Input['MabFileFolderProtectedItemExtendedInfoArgs'] extended_info: Additional information with this backup item.
:param pulumi.Input[str] friendly_name: Friendly name of this backup item.
:param pulumi.Input[bool] is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param pulumi.Input[bool] is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param pulumi.Input[bool] is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param pulumi.Input[str] last_backup_status: Status of last backup operation.
:param pulumi.Input[str] last_backup_time: Timestamp of the last backup operation on this backup item.
:param pulumi.Input[str] last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param pulumi.Input[str] policy_id: ID of the backup policy with which this item is backed up.
:param pulumi.Input[str] protection_state: Protected, ProtectionStopped, IRPending or ProtectionError
:param pulumi.Input[str] source_resource_id: ARM ID of the resource to be backed up.
:param pulumi.Input[Union[str, 'DataSourceType']] workload_type: Type of workload this item represents.
"""
pulumi.set(__self__, "protected_item_type", 'MabFileFolderProtectedItem')
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if computer_name is not None:
pulumi.set(__self__, "computer_name", computer_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_sync_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_sync_time_in_utc", deferred_delete_sync_time_in_utc)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_backup_time is not None:
pulumi.set(__self__, "last_backup_time", last_backup_time)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> pulumi.Input[str]:
"""
backup item type.
Expected value is 'MabFileFolderProtectedItem'.
"""
return pulumi.get(self, "protected_item_type")
@protected_item_type.setter
def protected_item_type(self, value: pulumi.Input[str]):
pulumi.set(self, "protected_item_type", value)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[pulumi.Input[Union[str, 'BackupManagementType']]]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@backup_management_type.setter
def backup_management_type(self, value: Optional[pulumi.Input[Union[str, 'BackupManagementType']]]):
pulumi.set(self, "backup_management_type", value)
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@backup_set_name.setter
def backup_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_set_name", value)
@property
@pulumi.getter(name="computerName")
def computer_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the computer associated with this backup item.
"""
return pulumi.get(self, "computer_name")
@computer_name.setter
def computer_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "computer_name", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[pulumi.Input[Union[str, 'CreateMode']]]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@create_mode.setter
def create_mode(self, value: Optional[pulumi.Input[Union[str, 'CreateMode']]]):
pulumi.set(self, "create_mode", value)
@property
@pulumi.getter(name="deferredDeleteSyncTimeInUTC")
def deferred_delete_sync_time_in_utc(self) -> Optional[pulumi.Input[float]]:
"""
Sync time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_sync_time_in_utc")
@deferred_delete_sync_time_in_utc.setter
def deferred_delete_sync_time_in_utc(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "deferred_delete_sync_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[pulumi.Input[str]]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@deferred_delete_time_in_utc.setter
def deferred_delete_time_in_utc(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_in_utc", value)
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[pulumi.Input[str]]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@deferred_delete_time_remaining.setter
def deferred_delete_time_remaining(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deferred_delete_time_remaining", value)
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional[pulumi.Input['MabFileFolderProtectedItemExtendedInfoArgs']]:
"""
Additional information with this backup item.
"""
return pulumi.get(self, "extended_info")
@extended_info.setter
def extended_info(self, value: Optional[pulumi.Input['MabFileFolderProtectedItemExtendedInfoArgs']]):
pulumi.set(self, "extended_info", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of this backup item.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@is_deferred_delete_schedule_upcoming.setter
def is_deferred_delete_schedule_upcoming(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_deferred_delete_schedule_upcoming", value)
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@is_rehydrate.setter
def is_rehydrate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_rehydrate", value)
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@is_scheduled_for_deferred_delete.setter
def is_scheduled_for_deferred_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_scheduled_for_deferred_delete", value)
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[pulumi.Input[str]]:
"""
Status of last backup operation.
"""
return pulumi.get(self, "last_backup_status")
@last_backup_status.setter
def last_backup_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_status", value)
@property
@pulumi.getter(name="lastBackupTime")
def last_backup_time(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp of the last backup operation on this backup item.
"""
return pulumi.get(self, "last_backup_time")
@last_backup_time.setter
def last_backup_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup_time", value)
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@last_recovery_point.setter
def last_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_recovery_point", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[pulumi.Input[str]]:
"""
Protected, ProtectionStopped, IRPending or ProtectionError
"""
return pulumi.get(self, "protection_state")
@protection_state.setter
def protection_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protection_state", value)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[pulumi.Input[Union[str, 'DataSourceType']]]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
@workload_type.setter
def workload_type(self, value: Optional[pulumi.Input[Union[str, 'DataSourceType']]]):
pulumi.set(self, "workload_type", value)
@pulumi.input_type
class MabFileFolderProtectedItemExtendedInfoArgs:
def __init__(__self__, *,
last_refreshed_at: Optional[pulumi.Input[str]] = None,
oldest_recovery_point: Optional[pulumi.Input[str]] = None,
recovery_point_count: Optional[pulumi.Input[int]] = None):
"""
Additional information on the backed up item.
:param pulumi.Input[str] last_refreshed_at: Last time when the agent data synced to service.
:param pulumi.Input[str] oldest_recovery_point: The oldest backup copy available.
:param pulumi.Input[int] recovery_point_count: Number of backup copies associated with the backup item.
"""
if last_refreshed_at is not None:
pulumi.set(__self__, "last_refreshed_at", last_refreshed_at)
if oldest_recovery_point is not None:
pulumi.set(__self__, "oldest_recovery_point", oldest_recovery_point)
if recovery_point_count is not None:
pulumi.set(__self__, "recovery_point_count", recovery_point_count)
@property
@pulumi.getter(name="lastRefreshedAt")
def last_refreshed_at(self) -> Optional[pulumi.Input[str]]:
"""
Last time when the agent data synced to service.
"""
return pulumi.get(self, "last_refreshed_at")
@last_refreshed_at.setter
def last_refreshed_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_refreshed_at", value)
@property
@pulumi.getter(name="oldestRecoveryPoint")
def oldest_recovery_point(self) -> Optional[pulumi.Input[str]]:
"""
The oldest backup copy available.
"""
return pulumi.get(self, "oldest_recovery_point")
@oldest_recovery_point.setter
def oldest_recovery_point(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "oldest_recovery_point", value)
@property
@pulumi.getter(name="recoveryPointCount")
def recovery_point_count(self) -> Optional[pulumi.Input[int]]:
"""
Number of backup copies associated with the backup item.
"""
return pulumi.get(self, "recovery_point_count")
@recovery_point_count.setter
def recovery_point_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "recovery_point_count", value)
| 46.344059
| 175
| 0.687004
| 28,843
| 239,089
| 5.440558
| 0.012031
| 0.091198
| 0.107882
| 0.068136
| 0.972203
| 0.961178
| 0.954857
| 0.939957
| 0.932788
| 0.923019
| 0
| 0.000005
| 0.211641
| 239,089
| 5,158
| 176
| 46.353044
| 0.832525
| 0.216601
| 0
| 0.915773
| 1
| 0
| 0.160598
| 0.072188
| 0
| 0
| 0
| 0
| 0
| 1
| 0.201577
| false
| 0
| 0.001893
| 0
| 0.307571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.